xref: /openbmc/qemu/linux-user/syscall.c (revision 5ae3ec63)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include <elf.h>
30 #include <endian.h>
31 #include <grp.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/mount.h>
36 #include <sys/file.h>
37 #include <sys/fsuid.h>
38 #include <sys/personality.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
41 #include <sys/swap.h>
42 #include <linux/capability.h>
43 #include <sched.h>
44 #include <sys/timex.h>
45 #include <sys/socket.h>
46 #include <linux/sockios.h>
47 #include <sys/un.h>
48 #include <sys/uio.h>
49 #include <poll.h>
50 #include <sys/times.h>
51 #include <sys/shm.h>
52 #include <sys/sem.h>
53 #include <sys/statfs.h>
54 #include <utime.h>
55 #include <sys/sysinfo.h>
56 #include <sys/signalfd.h>
57 #include <netinet/in.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <netinet/udp.h>
61 #include <linux/wireless.h>
62 #include <linux/icmp.h>
63 #include <linux/icmpv6.h>
64 #include <linux/if_tun.h>
65 #include <linux/in6.h>
66 #include <linux/errqueue.h>
67 #include <linux/random.h>
68 #ifdef CONFIG_TIMERFD
69 #include <sys/timerfd.h>
70 #endif
71 #ifdef CONFIG_EVENTFD
72 #include <sys/eventfd.h>
73 #endif
74 #ifdef CONFIG_EPOLL
75 #include <sys/epoll.h>
76 #endif
77 #ifdef CONFIG_ATTR
78 #include "qemu/xattr.h"
79 #endif
80 #ifdef CONFIG_SENDFILE
81 #include <sys/sendfile.h>
82 #endif
83 #ifdef HAVE_SYS_KCOV_H
84 #include <sys/kcov.h>
85 #endif
86 
87 #define termios host_termios
88 #define winsize host_winsize
89 #define termio host_termio
90 #define sgttyb host_sgttyb /* same as target */
91 #define tchars host_tchars /* same as target */
92 #define ltchars host_ltchars /* same as target */
93 
94 #include <linux/termios.h>
95 #include <linux/unistd.h>
96 #include <linux/cdrom.h>
97 #include <linux/hdreg.h>
98 #include <linux/soundcard.h>
99 #include <linux/kd.h>
100 #include <linux/mtio.h>
101 #include <linux/fs.h>
102 #include <linux/fd.h>
103 #if defined(CONFIG_FIEMAP)
104 #include <linux/fiemap.h>
105 #endif
106 #include <linux/fb.h>
107 #if defined(CONFIG_USBFS)
108 #include <linux/usbdevice_fs.h>
109 #include <linux/usb/ch9.h>
110 #endif
111 #include <linux/vt.h>
112 #include <linux/dm-ioctl.h>
113 #include <linux/reboot.h>
114 #include <linux/route.h>
115 #include <linux/filter.h>
116 #include <linux/blkpg.h>
117 #include <netpacket/packet.h>
118 #include <linux/netlink.h>
119 #include <linux/if_alg.h>
120 #include <linux/rtc.h>
121 #include <sound/asound.h>
122 #ifdef HAVE_BTRFS_H
123 #include <linux/btrfs.h>
124 #endif
125 #ifdef HAVE_DRM_H
126 #include <libdrm/drm.h>
127 #include <libdrm/i915_drm.h>
128 #endif
129 #include "linux_loop.h"
130 #include "uname.h"
131 
132 #include "qemu.h"
133 #include "user-internals.h"
134 #include "strace.h"
135 #include "signal-common.h"
136 #include "loader.h"
137 #include "user-mmap.h"
138 #include "user/safe-syscall.h"
139 #include "qemu/guest-random.h"
140 #include "qemu/selfmap.h"
141 #include "user/syscall-trace.h"
142 #include "special-errno.h"
143 #include "qapi/error.h"
144 #include "fd-trans.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321           loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458 };
459 
460 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
461 
462 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
463 #if defined(__NR_utimensat)
464 #define __NR_sys_utimensat __NR_utimensat
465 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
466           const struct timespec *,tsp,int,flags)
467 #else
468 static int sys_utimensat(int dirfd, const char *pathname,
469                          const struct timespec times[2], int flags)
470 {
471     errno = ENOSYS;
472     return -1;
473 }
474 #endif
475 #endif /* TARGET_NR_utimensat */
476 
477 #ifdef TARGET_NR_renameat2
478 #if defined(__NR_renameat2)
479 #define __NR_sys_renameat2 __NR_renameat2
480 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
481           const char *, new, unsigned int, flags)
482 #else
483 static int sys_renameat2(int oldfd, const char *old,
484                          int newfd, const char *new, int flags)
485 {
486     if (flags == 0) {
487         return renameat(oldfd, old, newfd, new);
488     }
489     errno = ENOSYS;
490     return -1;
491 }
492 #endif
493 #endif /* TARGET_NR_renameat2 */
494 
495 #ifdef CONFIG_INOTIFY
496 #include <sys/inotify.h>
497 #else
498 /* Userspace can usually survive runtime without inotify */
499 #undef TARGET_NR_inotify_init
500 #undef TARGET_NR_inotify_init1
501 #undef TARGET_NR_inotify_add_watch
502 #undef TARGET_NR_inotify_rm_watch
503 #endif /* CONFIG_INOTIFY  */
504 
505 #if defined(TARGET_NR_prlimit64)
506 #ifndef __NR_prlimit64
507 # define __NR_prlimit64 -1
508 #endif
509 #define __NR_sys_prlimit64 __NR_prlimit64
510 /* The glibc rlimit structure may not be that used by the underlying syscall */
511 struct host_rlimit64 {
512     uint64_t rlim_cur;
513     uint64_t rlim_max;
514 };
515 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
516           const struct host_rlimit64 *, new_limit,
517           struct host_rlimit64 *, old_limit)
518 #endif
519 
520 
521 #if defined(TARGET_NR_timer_create)
522 /* Maximum of 32 active POSIX timers allowed at any one time. */
523 #define GUEST_TIMER_MAX 32
524 static timer_t g_posix_timers[GUEST_TIMER_MAX];
525 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
526 
527 static inline int next_free_host_timer(void)
528 {
529     int k;
530     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
531         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
532             return k;
533         }
534     }
535     return -1;
536 }
537 
538 static inline void free_host_timer_slot(int id)
539 {
540     qatomic_store_release(g_posix_timer_allocated + id, 0);
541 }
542 #endif
543 
544 static inline int host_to_target_errno(int host_errno)
545 {
546     switch (host_errno) {
547 #define E(X)  case X: return TARGET_##X;
548 #include "errnos.c.inc"
549 #undef E
550     default:
551         return host_errno;
552     }
553 }
554 
555 static inline int target_to_host_errno(int target_errno)
556 {
557     switch (target_errno) {
558 #define E(X)  case TARGET_##X: return X;
559 #include "errnos.c.inc"
560 #undef E
561     default:
562         return target_errno;
563     }
564 }
565 
566 abi_long get_errno(abi_long ret)
567 {
568     if (ret == -1)
569         return -host_to_target_errno(errno);
570     else
571         return ret;
572 }
573 
574 const char *target_strerror(int err)
575 {
576     if (err == QEMU_ERESTARTSYS) {
577         return "To be restarted";
578     }
579     if (err == QEMU_ESIGRETURN) {
580         return "Successful exit from sigreturn";
581     }
582 
583     return strerror(target_to_host_errno(err));
584 }
585 
586 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
587 {
588     int i;
589     uint8_t b;
590     if (usize <= ksize) {
591         return 1;
592     }
593     for (i = ksize; i < usize; i++) {
594         if (get_user_u8(b, addr + i)) {
595             return -TARGET_EFAULT;
596         }
597         if (b != 0) {
598             return 0;
599         }
600     }
601     return 1;
602 }
603 
604 /*
605  * Copies a target struct to a host struct, in a way that guarantees
606  * backwards-compatibility for struct syscall arguments.
607  *
608  * Similar to kernels uaccess.h:copy_struct_from_user()
609  */
610 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
611 {
612     size_t size = MIN(ksize, usize);
613     size_t rest = MAX(ksize, usize) - size;
614 
615     /* Deal with trailing bytes. */
616     if (usize < ksize) {
617         memset(dst + size, 0, rest);
618     } else if (usize > ksize) {
619         int ret = check_zeroed_user(src, ksize, usize);
620         if (ret <= 0) {
621             return ret ?: -TARGET_E2BIG;
622         }
623     }
624     /* Copy the interoperable parts of the struct. */
625     if (copy_from_user(dst, src, size)) {
626         return -TARGET_EFAULT;
627     }
628     return 0;
629 }
630 
631 #define safe_syscall0(type, name) \
632 static type safe_##name(void) \
633 { \
634     return safe_syscall(__NR_##name); \
635 }
636 
637 #define safe_syscall1(type, name, type1, arg1) \
638 static type safe_##name(type1 arg1) \
639 { \
640     return safe_syscall(__NR_##name, arg1); \
641 }
642 
643 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
644 static type safe_##name(type1 arg1, type2 arg2) \
645 { \
646     return safe_syscall(__NR_##name, arg1, arg2); \
647 }
648 
649 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
650 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
651 { \
652     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
653 }
654 
655 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
656     type4, arg4) \
657 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
658 { \
659     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
660 }
661 
662 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
663     type4, arg4, type5, arg5) \
664 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
665     type5 arg5) \
666 { \
667     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
668 }
669 
670 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
671     type4, arg4, type5, arg5, type6, arg6) \
672 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
673     type5 arg5, type6 arg6) \
674 { \
675     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
676 }
677 
678 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
679 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
680 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
681               int, flags, mode_t, mode)
682 
683 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
684               const struct open_how_ver0 *, how, size_t, size)
685 
686 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
687 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
688               struct rusage *, rusage)
689 #endif
690 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
691               int, options, struct rusage *, rusage)
692 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
693 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
694               char **, argv, char **, envp, int, flags)
695 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
696     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
697 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
698               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
699 #endif
700 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
701 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
702               struct timespec *, tsp, const sigset_t *, sigmask,
703               size_t, sigsetsize)
704 #endif
705 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
706               int, maxevents, int, timeout, const sigset_t *, sigmask,
707               size_t, sigsetsize)
708 #if defined(__NR_futex)
709 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
710               const struct timespec *,timeout,int *,uaddr2,int,val3)
711 #endif
712 #if defined(__NR_futex_time64)
713 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
714               const struct timespec *,timeout,int *,uaddr2,int,val3)
715 #endif
716 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
717 safe_syscall2(int, kill, pid_t, pid, int, sig)
718 safe_syscall2(int, tkill, int, tid, int, sig)
719 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
720 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
721 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
722 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
723               unsigned long, pos_l, unsigned long, pos_h)
724 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
725               unsigned long, pos_l, unsigned long, pos_h)
726 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
727               socklen_t, addrlen)
728 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
729               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
730 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
731               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
732 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
733 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
734 safe_syscall2(int, flock, int, fd, int, operation)
735 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
736 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
737               const struct timespec *, uts, size_t, sigsetsize)
738 #endif
739 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
740               int, flags)
741 #if defined(TARGET_NR_nanosleep)
742 safe_syscall2(int, nanosleep, const struct timespec *, req,
743               struct timespec *, rem)
744 #endif
745 #if defined(TARGET_NR_clock_nanosleep) || \
746     defined(TARGET_NR_clock_nanosleep_time64)
747 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
748               const struct timespec *, req, struct timespec *, rem)
749 #endif
750 #ifdef __NR_ipc
751 #ifdef __s390x__
752 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
753               void *, ptr)
754 #else
755 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
756               void *, ptr, long, fifth)
757 #endif
758 #endif
759 #ifdef __NR_msgsnd
760 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
761               int, flags)
762 #endif
763 #ifdef __NR_msgrcv
764 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
765               long, msgtype, int, flags)
766 #endif
767 #ifdef __NR_semtimedop
768 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
769               unsigned, nsops, const struct timespec *, timeout)
770 #endif
771 #if defined(TARGET_NR_mq_timedsend) || \
772     defined(TARGET_NR_mq_timedsend_time64)
773 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
774               size_t, len, unsigned, prio, const struct timespec *, timeout)
775 #endif
776 #if defined(TARGET_NR_mq_timedreceive) || \
777     defined(TARGET_NR_mq_timedreceive_time64)
778 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
779               size_t, len, unsigned *, prio, const struct timespec *, timeout)
780 #endif
781 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
782 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
783               int, outfd, loff_t *, poutoff, size_t, length,
784               unsigned int, flags)
785 #endif
786 
787 /* We do ioctl like this rather than via safe_syscall3 to preserve the
788  * "third argument might be integer or pointer or not present" behaviour of
789  * the libc function.
790  */
791 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
792 /* Similarly for fcntl. Since we always build with LFS enabled,
793  * we should be using the 64-bit structures automatically.
794  */
795 #ifdef __NR_fcntl64
796 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
797 #else
798 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
799 #endif
800 
801 static inline int host_to_target_sock_type(int host_type)
802 {
803     int target_type;
804 
805     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
806     case SOCK_DGRAM:
807         target_type = TARGET_SOCK_DGRAM;
808         break;
809     case SOCK_STREAM:
810         target_type = TARGET_SOCK_STREAM;
811         break;
812     default:
813         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
814         break;
815     }
816 
817 #if defined(SOCK_CLOEXEC)
818     if (host_type & SOCK_CLOEXEC) {
819         target_type |= TARGET_SOCK_CLOEXEC;
820     }
821 #endif
822 
823 #if defined(SOCK_NONBLOCK)
824     if (host_type & SOCK_NONBLOCK) {
825         target_type |= TARGET_SOCK_NONBLOCK;
826     }
827 #endif
828 
829     return target_type;
830 }
831 
832 static abi_ulong target_brk, initial_target_brk;
833 
834 void target_set_brk(abi_ulong new_brk)
835 {
836     target_brk = TARGET_PAGE_ALIGN(new_brk);
837     initial_target_brk = target_brk;
838 }
839 
840 /* do_brk() must return target values and target errnos. */
841 abi_long do_brk(abi_ulong brk_val)
842 {
843     abi_long mapped_addr;
844     abi_ulong new_brk;
845     abi_ulong old_brk;
846 
847     /* brk pointers are always untagged */
848 
849     /* do not allow to shrink below initial brk value */
850     if (brk_val < initial_target_brk) {
851         return target_brk;
852     }
853 
854     new_brk = TARGET_PAGE_ALIGN(brk_val);
855     old_brk = TARGET_PAGE_ALIGN(target_brk);
856 
857     /* new and old target_brk might be on the same page */
858     if (new_brk == old_brk) {
859         target_brk = brk_val;
860         return target_brk;
861     }
862 
863     /* Release heap if necessary */
864     if (new_brk < old_brk) {
865         target_munmap(new_brk, old_brk - new_brk);
866 
867         target_brk = brk_val;
868         return target_brk;
869     }
870 
871     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
872                               PROT_READ | PROT_WRITE,
873                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
874                               -1, 0);
875 
876     if (mapped_addr == old_brk) {
877         target_brk = brk_val;
878         return target_brk;
879     }
880 
881 #if defined(TARGET_ALPHA)
882     /* We (partially) emulate OSF/1 on Alpha, which requires we
883        return a proper errno, not an unchanged brk value.  */
884     return -TARGET_ENOMEM;
885 #endif
886     /* For everything else, return the previous break. */
887     return target_brk;
888 }
889 
890 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
891     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
892 static inline abi_long copy_from_user_fdset(fd_set *fds,
893                                             abi_ulong target_fds_addr,
894                                             int n)
895 {
896     int i, nw, j, k;
897     abi_ulong b, *target_fds;
898 
899     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
900     if (!(target_fds = lock_user(VERIFY_READ,
901                                  target_fds_addr,
902                                  sizeof(abi_ulong) * nw,
903                                  1)))
904         return -TARGET_EFAULT;
905 
906     FD_ZERO(fds);
907     k = 0;
908     for (i = 0; i < nw; i++) {
909         /* grab the abi_ulong */
910         __get_user(b, &target_fds[i]);
911         for (j = 0; j < TARGET_ABI_BITS; j++) {
912             /* check the bit inside the abi_ulong */
913             if ((b >> j) & 1)
914                 FD_SET(k, fds);
915             k++;
916         }
917     }
918 
919     unlock_user(target_fds, target_fds_addr, 0);
920 
921     return 0;
922 }
923 
924 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
925                                                  abi_ulong target_fds_addr,
926                                                  int n)
927 {
928     if (target_fds_addr) {
929         if (copy_from_user_fdset(fds, target_fds_addr, n))
930             return -TARGET_EFAULT;
931         *fds_ptr = fds;
932     } else {
933         *fds_ptr = NULL;
934     }
935     return 0;
936 }
937 
938 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
939                                           const fd_set *fds,
940                                           int n)
941 {
942     int i, nw, j, k;
943     abi_long v;
944     abi_ulong *target_fds;
945 
946     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947     if (!(target_fds = lock_user(VERIFY_WRITE,
948                                  target_fds_addr,
949                                  sizeof(abi_ulong) * nw,
950                                  0)))
951         return -TARGET_EFAULT;
952 
953     k = 0;
954     for (i = 0; i < nw; i++) {
955         v = 0;
956         for (j = 0; j < TARGET_ABI_BITS; j++) {
957             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
958             k++;
959         }
960         __put_user(v, &target_fds[i]);
961     }
962 
963     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
964 
965     return 0;
966 }
967 #endif
968 
969 #if defined(__alpha__)
970 #define HOST_HZ 1024
971 #else
972 #define HOST_HZ 100
973 #endif
974 
975 static inline abi_long host_to_target_clock_t(long ticks)
976 {
977 #if HOST_HZ == TARGET_HZ
978     return ticks;
979 #else
980     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
981 #endif
982 }
983 
984 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
985                                              const struct rusage *rusage)
986 {
987     struct target_rusage *target_rusage;
988 
989     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
990         return -TARGET_EFAULT;
991     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
992     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
993     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
994     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
995     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
996     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
997     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
998     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
999     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1000     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1001     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1002     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1003     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1004     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1005     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1006     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1007     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1008     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1009     unlock_user_struct(target_rusage, target_addr, 1);
1010 
1011     return 0;
1012 }
1013 
1014 #ifdef TARGET_NR_setrlimit
1015 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1016 {
1017     abi_ulong target_rlim_swap;
1018     rlim_t result;
1019 
1020     target_rlim_swap = tswapal(target_rlim);
1021     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1022         return RLIM_INFINITY;
1023 
1024     result = target_rlim_swap;
1025     if (target_rlim_swap != (rlim_t)result)
1026         return RLIM_INFINITY;
1027 
1028     return result;
1029 }
1030 #endif
1031 
1032 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1033 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1034 {
1035     abi_ulong target_rlim_swap;
1036     abi_ulong result;
1037 
1038     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1039         target_rlim_swap = TARGET_RLIM_INFINITY;
1040     else
1041         target_rlim_swap = rlim;
1042     result = tswapal(target_rlim_swap);
1043 
1044     return result;
1045 }
1046 #endif
1047 
1048 static inline int target_to_host_resource(int code)
1049 {
1050     switch (code) {
1051     case TARGET_RLIMIT_AS:
1052         return RLIMIT_AS;
1053     case TARGET_RLIMIT_CORE:
1054         return RLIMIT_CORE;
1055     case TARGET_RLIMIT_CPU:
1056         return RLIMIT_CPU;
1057     case TARGET_RLIMIT_DATA:
1058         return RLIMIT_DATA;
1059     case TARGET_RLIMIT_FSIZE:
1060         return RLIMIT_FSIZE;
1061     case TARGET_RLIMIT_LOCKS:
1062         return RLIMIT_LOCKS;
1063     case TARGET_RLIMIT_MEMLOCK:
1064         return RLIMIT_MEMLOCK;
1065     case TARGET_RLIMIT_MSGQUEUE:
1066         return RLIMIT_MSGQUEUE;
1067     case TARGET_RLIMIT_NICE:
1068         return RLIMIT_NICE;
1069     case TARGET_RLIMIT_NOFILE:
1070         return RLIMIT_NOFILE;
1071     case TARGET_RLIMIT_NPROC:
1072         return RLIMIT_NPROC;
1073     case TARGET_RLIMIT_RSS:
1074         return RLIMIT_RSS;
1075     case TARGET_RLIMIT_RTPRIO:
1076         return RLIMIT_RTPRIO;
1077 #ifdef RLIMIT_RTTIME
1078     case TARGET_RLIMIT_RTTIME:
1079         return RLIMIT_RTTIME;
1080 #endif
1081     case TARGET_RLIMIT_SIGPENDING:
1082         return RLIMIT_SIGPENDING;
1083     case TARGET_RLIMIT_STACK:
1084         return RLIMIT_STACK;
1085     default:
1086         return code;
1087     }
1088 }
1089 
1090 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1091                                               abi_ulong target_tv_addr)
1092 {
1093     struct target_timeval *target_tv;
1094 
1095     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1096         return -TARGET_EFAULT;
1097     }
1098 
1099     __get_user(tv->tv_sec, &target_tv->tv_sec);
1100     __get_user(tv->tv_usec, &target_tv->tv_usec);
1101 
1102     unlock_user_struct(target_tv, target_tv_addr, 0);
1103 
1104     return 0;
1105 }
1106 
1107 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1108                                             const struct timeval *tv)
1109 {
1110     struct target_timeval *target_tv;
1111 
1112     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1113         return -TARGET_EFAULT;
1114     }
1115 
1116     __put_user(tv->tv_sec, &target_tv->tv_sec);
1117     __put_user(tv->tv_usec, &target_tv->tv_usec);
1118 
1119     unlock_user_struct(target_tv, target_tv_addr, 1);
1120 
1121     return 0;
1122 }
1123 
1124 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1125 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1126                                                 abi_ulong target_tv_addr)
1127 {
1128     struct target__kernel_sock_timeval *target_tv;
1129 
1130     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1131         return -TARGET_EFAULT;
1132     }
1133 
1134     __get_user(tv->tv_sec, &target_tv->tv_sec);
1135     __get_user(tv->tv_usec, &target_tv->tv_usec);
1136 
1137     unlock_user_struct(target_tv, target_tv_addr, 0);
1138 
1139     return 0;
1140 }
1141 #endif
1142 
1143 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1144                                               const struct timeval *tv)
1145 {
1146     struct target__kernel_sock_timeval *target_tv;
1147 
1148     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1149         return -TARGET_EFAULT;
1150     }
1151 
1152     __put_user(tv->tv_sec, &target_tv->tv_sec);
1153     __put_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 1);
1156 
1157     return 0;
1158 }
1159 
1160 #if defined(TARGET_NR_futex) || \
1161     defined(TARGET_NR_rt_sigtimedwait) || \
1162     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1163     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1164     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1165     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1166     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1167     defined(TARGET_NR_timer_settime) || \
1168     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1169 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1170                                                abi_ulong target_addr)
1171 {
1172     struct target_timespec *target_ts;
1173 
1174     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1175         return -TARGET_EFAULT;
1176     }
1177     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1178     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179     unlock_user_struct(target_ts, target_addr, 0);
1180     return 0;
1181 }
1182 #endif
1183 
1184 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1185     defined(TARGET_NR_timer_settime64) || \
1186     defined(TARGET_NR_mq_timedsend_time64) || \
1187     defined(TARGET_NR_mq_timedreceive_time64) || \
1188     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1189     defined(TARGET_NR_clock_nanosleep_time64) || \
1190     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1191     defined(TARGET_NR_utimensat) || \
1192     defined(TARGET_NR_utimensat_time64) || \
1193     defined(TARGET_NR_semtimedop_time64) || \
1194     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1195 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1196                                                  abi_ulong target_addr)
1197 {
1198     struct target__kernel_timespec *target_ts;
1199 
1200     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1204     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1205     /* in 32bit mode, this drops the padding */
1206     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1207     unlock_user_struct(target_ts, target_addr, 0);
1208     return 0;
1209 }
1210 #endif
1211 
1212 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1213                                                struct timespec *host_ts)
1214 {
1215     struct target_timespec *target_ts;
1216 
1217     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1218         return -TARGET_EFAULT;
1219     }
1220     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1221     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1222     unlock_user_struct(target_ts, target_addr, 1);
1223     return 0;
1224 }
1225 
1226 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1227                                                  struct timespec *host_ts)
1228 {
1229     struct target__kernel_timespec *target_ts;
1230 
1231     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1232         return -TARGET_EFAULT;
1233     }
1234     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1235     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236     unlock_user_struct(target_ts, target_addr, 1);
1237     return 0;
1238 }
1239 
1240 #if defined(TARGET_NR_gettimeofday)
1241 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1242                                              struct timezone *tz)
1243 {
1244     struct target_timezone *target_tz;
1245 
1246     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1247         return -TARGET_EFAULT;
1248     }
1249 
1250     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1251     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1252 
1253     unlock_user_struct(target_tz, target_tz_addr, 1);
1254 
1255     return 0;
1256 }
1257 #endif
1258 
1259 #if defined(TARGET_NR_settimeofday)
1260 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1261                                                abi_ulong target_tz_addr)
1262 {
1263     struct target_timezone *target_tz;
1264 
1265     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1266         return -TARGET_EFAULT;
1267     }
1268 
1269     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1270     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1271 
1272     unlock_user_struct(target_tz, target_tz_addr, 0);
1273 
1274     return 0;
1275 }
1276 #endif
1277 
1278 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1279 #include <mqueue.h>
1280 
1281 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1282                                               abi_ulong target_mq_attr_addr)
1283 {
1284     struct target_mq_attr *target_mq_attr;
1285 
1286     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1287                           target_mq_attr_addr, 1))
1288         return -TARGET_EFAULT;
1289 
1290     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1291     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1292     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1293     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1294 
1295     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1296 
1297     return 0;
1298 }
1299 
1300 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1301                                             const struct mq_attr *attr)
1302 {
1303     struct target_mq_attr *target_mq_attr;
1304 
1305     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1306                           target_mq_attr_addr, 0))
1307         return -TARGET_EFAULT;
1308 
1309     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1310     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1311     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1312     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1313 
1314     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1315 
1316     return 0;
1317 }
1318 #endif
1319 
1320 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1321 /* do_select() must return target values and target errnos. */
1322 static abi_long do_select(int n,
1323                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1324                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1325 {
1326     fd_set rfds, wfds, efds;
1327     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1328     struct timeval tv;
1329     struct timespec ts, *ts_ptr;
1330     abi_long ret;
1331 
1332     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1333     if (ret) {
1334         return ret;
1335     }
1336     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1337     if (ret) {
1338         return ret;
1339     }
1340     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1341     if (ret) {
1342         return ret;
1343     }
1344 
1345     if (target_tv_addr) {
1346         if (copy_from_user_timeval(&tv, target_tv_addr))
1347             return -TARGET_EFAULT;
1348         ts.tv_sec = tv.tv_sec;
1349         ts.tv_nsec = tv.tv_usec * 1000;
1350         ts_ptr = &ts;
1351     } else {
1352         ts_ptr = NULL;
1353     }
1354 
1355     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1356                                   ts_ptr, NULL));
1357 
1358     if (!is_error(ret)) {
1359         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1360             return -TARGET_EFAULT;
1361         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1362             return -TARGET_EFAULT;
1363         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1364             return -TARGET_EFAULT;
1365 
1366         if (target_tv_addr) {
1367             tv.tv_sec = ts.tv_sec;
1368             tv.tv_usec = ts.tv_nsec / 1000;
1369             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1370                 return -TARGET_EFAULT;
1371             }
1372         }
1373     }
1374 
1375     return ret;
1376 }
1377 
1378 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1379 static abi_long do_old_select(abi_ulong arg1)
1380 {
1381     struct target_sel_arg_struct *sel;
1382     abi_ulong inp, outp, exp, tvp;
1383     long nsel;
1384 
1385     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1386         return -TARGET_EFAULT;
1387     }
1388 
1389     nsel = tswapal(sel->n);
1390     inp = tswapal(sel->inp);
1391     outp = tswapal(sel->outp);
1392     exp = tswapal(sel->exp);
1393     tvp = tswapal(sel->tvp);
1394 
1395     unlock_user_struct(sel, arg1, 0);
1396 
1397     return do_select(nsel, inp, outp, exp, tvp);
1398 }
1399 #endif
1400 #endif
1401 
1402 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1403 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1404                             abi_long arg4, abi_long arg5, abi_long arg6,
1405                             bool time64)
1406 {
1407     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1408     fd_set rfds, wfds, efds;
1409     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1410     struct timespec ts, *ts_ptr;
1411     abi_long ret;
1412 
1413     /*
1414      * The 6th arg is actually two args smashed together,
1415      * so we cannot use the C library.
1416      */
1417     struct {
1418         sigset_t *set;
1419         size_t size;
1420     } sig, *sig_ptr;
1421 
1422     abi_ulong arg_sigset, arg_sigsize, *arg7;
1423 
1424     n = arg1;
1425     rfd_addr = arg2;
1426     wfd_addr = arg3;
1427     efd_addr = arg4;
1428     ts_addr = arg5;
1429 
1430     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1431     if (ret) {
1432         return ret;
1433     }
1434     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1435     if (ret) {
1436         return ret;
1437     }
1438     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1439     if (ret) {
1440         return ret;
1441     }
1442 
1443     /*
1444      * This takes a timespec, and not a timeval, so we cannot
1445      * use the do_select() helper ...
1446      */
1447     if (ts_addr) {
1448         if (time64) {
1449             if (target_to_host_timespec64(&ts, ts_addr)) {
1450                 return -TARGET_EFAULT;
1451             }
1452         } else {
1453             if (target_to_host_timespec(&ts, ts_addr)) {
1454                 return -TARGET_EFAULT;
1455             }
1456         }
1457             ts_ptr = &ts;
1458     } else {
1459         ts_ptr = NULL;
1460     }
1461 
1462     /* Extract the two packed args for the sigset */
1463     sig_ptr = NULL;
1464     if (arg6) {
1465         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1466         if (!arg7) {
1467             return -TARGET_EFAULT;
1468         }
1469         arg_sigset = tswapal(arg7[0]);
1470         arg_sigsize = tswapal(arg7[1]);
1471         unlock_user(arg7, arg6, 0);
1472 
1473         if (arg_sigset) {
1474             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1475             if (ret != 0) {
1476                 return ret;
1477             }
1478             sig_ptr = &sig;
1479             sig.size = SIGSET_T_SIZE;
1480         }
1481     }
1482 
1483     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1484                                   ts_ptr, sig_ptr));
1485 
1486     if (sig_ptr) {
1487         finish_sigsuspend_mask(ret);
1488     }
1489 
1490     if (!is_error(ret)) {
1491         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1492             return -TARGET_EFAULT;
1493         }
1494         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1495             return -TARGET_EFAULT;
1496         }
1497         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (time64) {
1501             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1502                 return -TARGET_EFAULT;
1503             }
1504         } else {
1505             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1506                 return -TARGET_EFAULT;
1507             }
1508         }
1509     }
1510     return ret;
1511 }
1512 #endif
1513 
1514 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1515     defined(TARGET_NR_ppoll_time64)
1516 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1517                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1518 {
1519     struct target_pollfd *target_pfd;
1520     unsigned int nfds = arg2;
1521     struct pollfd *pfd;
1522     unsigned int i;
1523     abi_long ret;
1524 
1525     pfd = NULL;
1526     target_pfd = NULL;
1527     if (nfds) {
1528         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1529             return -TARGET_EINVAL;
1530         }
1531         target_pfd = lock_user(VERIFY_WRITE, arg1,
1532                                sizeof(struct target_pollfd) * nfds, 1);
1533         if (!target_pfd) {
1534             return -TARGET_EFAULT;
1535         }
1536 
1537         pfd = alloca(sizeof(struct pollfd) * nfds);
1538         for (i = 0; i < nfds; i++) {
1539             pfd[i].fd = tswap32(target_pfd[i].fd);
1540             pfd[i].events = tswap16(target_pfd[i].events);
1541         }
1542     }
1543     if (ppoll) {
1544         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1545         sigset_t *set = NULL;
1546 
1547         if (arg3) {
1548             if (time64) {
1549                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1550                     unlock_user(target_pfd, arg1, 0);
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (target_to_host_timespec(timeout_ts, arg3)) {
1555                     unlock_user(target_pfd, arg1, 0);
1556                     return -TARGET_EFAULT;
1557                 }
1558             }
1559         } else {
1560             timeout_ts = NULL;
1561         }
1562 
1563         if (arg4) {
1564             ret = process_sigsuspend_mask(&set, arg4, arg5);
1565             if (ret != 0) {
1566                 unlock_user(target_pfd, arg1, 0);
1567                 return ret;
1568             }
1569         }
1570 
1571         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1572                                    set, SIGSET_T_SIZE));
1573 
1574         if (set) {
1575             finish_sigsuspend_mask(ret);
1576         }
1577         if (!is_error(ret) && arg3) {
1578             if (time64) {
1579                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1580                     return -TARGET_EFAULT;
1581                 }
1582             } else {
1583                 if (host_to_target_timespec(arg3, timeout_ts)) {
1584                     return -TARGET_EFAULT;
1585                 }
1586             }
1587         }
1588     } else {
1589           struct timespec ts, *pts;
1590 
1591           if (arg3 >= 0) {
1592               /* Convert ms to secs, ns */
1593               ts.tv_sec = arg3 / 1000;
1594               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1595               pts = &ts;
1596           } else {
1597               /* -ve poll() timeout means "infinite" */
1598               pts = NULL;
1599           }
1600           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1601     }
1602 
1603     if (!is_error(ret)) {
1604         for (i = 0; i < nfds; i++) {
1605             target_pfd[i].revents = tswap16(pfd[i].revents);
1606         }
1607     }
1608     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1609     return ret;
1610 }
1611 #endif
1612 
1613 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1614                         int flags, int is_pipe2)
1615 {
1616     int host_pipe[2];
1617     abi_long ret;
1618     ret = pipe2(host_pipe, flags);
1619 
1620     if (is_error(ret))
1621         return get_errno(ret);
1622 
1623     /* Several targets have special calling conventions for the original
1624        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1625     if (!is_pipe2) {
1626 #if defined(TARGET_ALPHA)
1627         cpu_env->ir[IR_A4] = host_pipe[1];
1628         return host_pipe[0];
1629 #elif defined(TARGET_MIPS)
1630         cpu_env->active_tc.gpr[3] = host_pipe[1];
1631         return host_pipe[0];
1632 #elif defined(TARGET_SH4)
1633         cpu_env->gregs[1] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_SPARC)
1636         cpu_env->regwptr[1] = host_pipe[1];
1637         return host_pipe[0];
1638 #endif
1639     }
1640 
1641     if (put_user_s32(host_pipe[0], pipedes)
1642         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1643         return -TARGET_EFAULT;
1644     return get_errno(ret);
1645 }
1646 
1647 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1648                                                abi_ulong target_addr,
1649                                                socklen_t len)
1650 {
1651     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1652     sa_family_t sa_family;
1653     struct target_sockaddr *target_saddr;
1654 
1655     if (fd_trans_target_to_host_addr(fd)) {
1656         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1657     }
1658 
1659     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1660     if (!target_saddr)
1661         return -TARGET_EFAULT;
1662 
1663     sa_family = tswap16(target_saddr->sa_family);
1664 
1665     /* Oops. The caller might send a incomplete sun_path; sun_path
1666      * must be terminated by \0 (see the manual page), but
1667      * unfortunately it is quite common to specify sockaddr_un
1668      * length as "strlen(x->sun_path)" while it should be
1669      * "strlen(...) + 1". We'll fix that here if needed.
1670      * Linux kernel has a similar feature.
1671      */
1672 
1673     if (sa_family == AF_UNIX) {
1674         if (len < unix_maxlen && len > 0) {
1675             char *cp = (char*)target_saddr;
1676 
1677             if ( cp[len-1] && !cp[len] )
1678                 len++;
1679         }
1680         if (len > unix_maxlen)
1681             len = unix_maxlen;
1682     }
1683 
1684     memcpy(addr, target_saddr, len);
1685     addr->sa_family = sa_family;
1686     if (sa_family == AF_NETLINK) {
1687         struct sockaddr_nl *nladdr;
1688 
1689         nladdr = (struct sockaddr_nl *)addr;
1690         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1691         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1692     } else if (sa_family == AF_PACKET) {
1693 	struct target_sockaddr_ll *lladdr;
1694 
1695 	lladdr = (struct target_sockaddr_ll *)addr;
1696 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1697 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1698     } else if (sa_family == AF_INET6) {
1699         struct sockaddr_in6 *in6addr;
1700 
1701         in6addr = (struct sockaddr_in6 *)addr;
1702         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1703     }
1704     unlock_user(target_saddr, target_addr, 0);
1705 
1706     return 0;
1707 }
1708 
1709 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1710                                                struct sockaddr *addr,
1711                                                socklen_t len)
1712 {
1713     struct target_sockaddr *target_saddr;
1714 
1715     if (len == 0) {
1716         return 0;
1717     }
1718     assert(addr);
1719 
1720     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1721     if (!target_saddr)
1722         return -TARGET_EFAULT;
1723     memcpy(target_saddr, addr, len);
1724     if (len >= offsetof(struct target_sockaddr, sa_family) +
1725         sizeof(target_saddr->sa_family)) {
1726         target_saddr->sa_family = tswap16(addr->sa_family);
1727     }
1728     if (addr->sa_family == AF_NETLINK &&
1729         len >= sizeof(struct target_sockaddr_nl)) {
1730         struct target_sockaddr_nl *target_nl =
1731                (struct target_sockaddr_nl *)target_saddr;
1732         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1733         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1734     } else if (addr->sa_family == AF_PACKET) {
1735         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1736         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1737         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1738     } else if (addr->sa_family == AF_INET6 &&
1739                len >= sizeof(struct target_sockaddr_in6)) {
1740         struct target_sockaddr_in6 *target_in6 =
1741                (struct target_sockaddr_in6 *)target_saddr;
1742         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1743     }
1744     unlock_user(target_saddr, target_addr, len);
1745 
1746     return 0;
1747 }
1748 
1749 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1750                                            struct target_msghdr *target_msgh)
1751 {
1752     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1753     abi_long msg_controllen;
1754     abi_ulong target_cmsg_addr;
1755     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1756     socklen_t space = 0;
1757 
1758     msg_controllen = tswapal(target_msgh->msg_controllen);
1759     if (msg_controllen < sizeof (struct target_cmsghdr))
1760         goto the_end;
1761     target_cmsg_addr = tswapal(target_msgh->msg_control);
1762     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1763     target_cmsg_start = target_cmsg;
1764     if (!target_cmsg)
1765         return -TARGET_EFAULT;
1766 
1767     while (cmsg && target_cmsg) {
1768         void *data = CMSG_DATA(cmsg);
1769         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1770 
1771         int len = tswapal(target_cmsg->cmsg_len)
1772             - sizeof(struct target_cmsghdr);
1773 
1774         space += CMSG_SPACE(len);
1775         if (space > msgh->msg_controllen) {
1776             space -= CMSG_SPACE(len);
1777             /* This is a QEMU bug, since we allocated the payload
1778              * area ourselves (unlike overflow in host-to-target
1779              * conversion, which is just the guest giving us a buffer
1780              * that's too small). It can't happen for the payload types
1781              * we currently support; if it becomes an issue in future
1782              * we would need to improve our allocation strategy to
1783              * something more intelligent than "twice the size of the
1784              * target buffer we're reading from".
1785              */
1786             qemu_log_mask(LOG_UNIMP,
1787                           ("Unsupported ancillary data %d/%d: "
1788                            "unhandled msg size\n"),
1789                           tswap32(target_cmsg->cmsg_level),
1790                           tswap32(target_cmsg->cmsg_type));
1791             break;
1792         }
1793 
1794         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1795             cmsg->cmsg_level = SOL_SOCKET;
1796         } else {
1797             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1798         }
1799         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1800         cmsg->cmsg_len = CMSG_LEN(len);
1801 
1802         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1803             int *fd = (int *)data;
1804             int *target_fd = (int *)target_data;
1805             int i, numfds = len / sizeof(int);
1806 
1807             for (i = 0; i < numfds; i++) {
1808                 __get_user(fd[i], target_fd + i);
1809             }
1810         } else if (cmsg->cmsg_level == SOL_SOCKET
1811                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1812             struct ucred *cred = (struct ucred *)data;
1813             struct target_ucred *target_cred =
1814                 (struct target_ucred *)target_data;
1815 
1816             __get_user(cred->pid, &target_cred->pid);
1817             __get_user(cred->uid, &target_cred->uid);
1818             __get_user(cred->gid, &target_cred->gid);
1819         } else if (cmsg->cmsg_level == SOL_ALG) {
1820             uint32_t *dst = (uint32_t *)data;
1821 
1822             memcpy(dst, target_data, len);
1823             /* fix endianness of first 32-bit word */
1824             if (len >= sizeof(uint32_t)) {
1825                 *dst = tswap32(*dst);
1826             }
1827         } else {
1828             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1829                           cmsg->cmsg_level, cmsg->cmsg_type);
1830             memcpy(data, target_data, len);
1831         }
1832 
1833         cmsg = CMSG_NXTHDR(msgh, cmsg);
1834         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1835                                          target_cmsg_start);
1836     }
1837     unlock_user(target_cmsg, target_cmsg_addr, 0);
1838  the_end:
1839     msgh->msg_controllen = space;
1840     return 0;
1841 }
1842 
1843 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1844                                            struct msghdr *msgh)
1845 {
1846     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1847     abi_long msg_controllen;
1848     abi_ulong target_cmsg_addr;
1849     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1850     socklen_t space = 0;
1851 
1852     msg_controllen = tswapal(target_msgh->msg_controllen);
1853     if (msg_controllen < sizeof (struct target_cmsghdr))
1854         goto the_end;
1855     target_cmsg_addr = tswapal(target_msgh->msg_control);
1856     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1857     target_cmsg_start = target_cmsg;
1858     if (!target_cmsg)
1859         return -TARGET_EFAULT;
1860 
1861     while (cmsg && target_cmsg) {
1862         void *data = CMSG_DATA(cmsg);
1863         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1864 
1865         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1866         int tgt_len, tgt_space;
1867 
1868         /* We never copy a half-header but may copy half-data;
1869          * this is Linux's behaviour in put_cmsg(). Note that
1870          * truncation here is a guest problem (which we report
1871          * to the guest via the CTRUNC bit), unlike truncation
1872          * in target_to_host_cmsg, which is a QEMU bug.
1873          */
1874         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1875             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1876             break;
1877         }
1878 
1879         if (cmsg->cmsg_level == SOL_SOCKET) {
1880             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1881         } else {
1882             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1883         }
1884         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1885 
1886         /* Payload types which need a different size of payload on
1887          * the target must adjust tgt_len here.
1888          */
1889         tgt_len = len;
1890         switch (cmsg->cmsg_level) {
1891         case SOL_SOCKET:
1892             switch (cmsg->cmsg_type) {
1893             case SO_TIMESTAMP:
1894                 tgt_len = sizeof(struct target_timeval);
1895                 break;
1896             default:
1897                 break;
1898             }
1899             break;
1900         default:
1901             break;
1902         }
1903 
1904         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1905             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1906             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1907         }
1908 
1909         /* We must now copy-and-convert len bytes of payload
1910          * into tgt_len bytes of destination space. Bear in mind
1911          * that in both source and destination we may be dealing
1912          * with a truncated value!
1913          */
1914         switch (cmsg->cmsg_level) {
1915         case SOL_SOCKET:
1916             switch (cmsg->cmsg_type) {
1917             case SCM_RIGHTS:
1918             {
1919                 int *fd = (int *)data;
1920                 int *target_fd = (int *)target_data;
1921                 int i, numfds = tgt_len / sizeof(int);
1922 
1923                 for (i = 0; i < numfds; i++) {
1924                     __put_user(fd[i], target_fd + i);
1925                 }
1926                 break;
1927             }
1928             case SO_TIMESTAMP:
1929             {
1930                 struct timeval *tv = (struct timeval *)data;
1931                 struct target_timeval *target_tv =
1932                     (struct target_timeval *)target_data;
1933 
1934                 if (len != sizeof(struct timeval) ||
1935                     tgt_len != sizeof(struct target_timeval)) {
1936                     goto unimplemented;
1937                 }
1938 
1939                 /* copy struct timeval to target */
1940                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1941                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1942                 break;
1943             }
1944             case SCM_CREDENTIALS:
1945             {
1946                 struct ucred *cred = (struct ucred *)data;
1947                 struct target_ucred *target_cred =
1948                     (struct target_ucred *)target_data;
1949 
1950                 __put_user(cred->pid, &target_cred->pid);
1951                 __put_user(cred->uid, &target_cred->uid);
1952                 __put_user(cred->gid, &target_cred->gid);
1953                 break;
1954             }
1955             default:
1956                 goto unimplemented;
1957             }
1958             break;
1959 
1960         case SOL_IP:
1961             switch (cmsg->cmsg_type) {
1962             case IP_TTL:
1963             {
1964                 uint32_t *v = (uint32_t *)data;
1965                 uint32_t *t_int = (uint32_t *)target_data;
1966 
1967                 if (len != sizeof(uint32_t) ||
1968                     tgt_len != sizeof(uint32_t)) {
1969                     goto unimplemented;
1970                 }
1971                 __put_user(*v, t_int);
1972                 break;
1973             }
1974             case IP_RECVERR:
1975             {
1976                 struct errhdr_t {
1977                    struct sock_extended_err ee;
1978                    struct sockaddr_in offender;
1979                 };
1980                 struct errhdr_t *errh = (struct errhdr_t *)data;
1981                 struct errhdr_t *target_errh =
1982                     (struct errhdr_t *)target_data;
1983 
1984                 if (len != sizeof(struct errhdr_t) ||
1985                     tgt_len != sizeof(struct errhdr_t)) {
1986                     goto unimplemented;
1987                 }
1988                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1989                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1990                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1991                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1992                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1993                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1994                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1995                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1996                     (void *) &errh->offender, sizeof(errh->offender));
1997                 break;
1998             }
1999             default:
2000                 goto unimplemented;
2001             }
2002             break;
2003 
2004         case SOL_IPV6:
2005             switch (cmsg->cmsg_type) {
2006             case IPV6_HOPLIMIT:
2007             {
2008                 uint32_t *v = (uint32_t *)data;
2009                 uint32_t *t_int = (uint32_t *)target_data;
2010 
2011                 if (len != sizeof(uint32_t) ||
2012                     tgt_len != sizeof(uint32_t)) {
2013                     goto unimplemented;
2014                 }
2015                 __put_user(*v, t_int);
2016                 break;
2017             }
2018             case IPV6_RECVERR:
2019             {
2020                 struct errhdr6_t {
2021                    struct sock_extended_err ee;
2022                    struct sockaddr_in6 offender;
2023                 };
2024                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2025                 struct errhdr6_t *target_errh =
2026                     (struct errhdr6_t *)target_data;
2027 
2028                 if (len != sizeof(struct errhdr6_t) ||
2029                     tgt_len != sizeof(struct errhdr6_t)) {
2030                     goto unimplemented;
2031                 }
2032                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2033                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2034                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2035                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2036                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2037                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2038                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2039                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2040                     (void *) &errh->offender, sizeof(errh->offender));
2041                 break;
2042             }
2043             default:
2044                 goto unimplemented;
2045             }
2046             break;
2047 
2048         default:
2049         unimplemented:
2050             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2051                           cmsg->cmsg_level, cmsg->cmsg_type);
2052             memcpy(target_data, data, MIN(len, tgt_len));
2053             if (tgt_len > len) {
2054                 memset(target_data + len, 0, tgt_len - len);
2055             }
2056         }
2057 
2058         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2059         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2060         if (msg_controllen < tgt_space) {
2061             tgt_space = msg_controllen;
2062         }
2063         msg_controllen -= tgt_space;
2064         space += tgt_space;
2065         cmsg = CMSG_NXTHDR(msgh, cmsg);
2066         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2067                                          target_cmsg_start);
2068     }
2069     unlock_user(target_cmsg, target_cmsg_addr, space);
2070  the_end:
2071     target_msgh->msg_controllen = tswapal(space);
2072     return 0;
2073 }
2074 
2075 /* do_setsockopt() Must return target values and target errnos. */
2076 static abi_long do_setsockopt(int sockfd, int level, int optname,
2077                               abi_ulong optval_addr, socklen_t optlen)
2078 {
2079     abi_long ret;
2080     int val;
2081 
2082     switch(level) {
2083     case SOL_TCP:
2084     case SOL_UDP:
2085         /* TCP and UDP options all take an 'int' value.  */
2086         if (optlen < sizeof(uint32_t))
2087             return -TARGET_EINVAL;
2088 
2089         if (get_user_u32(val, optval_addr))
2090             return -TARGET_EFAULT;
2091         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2092         break;
2093     case SOL_IP:
2094         switch(optname) {
2095         case IP_TOS:
2096         case IP_TTL:
2097         case IP_HDRINCL:
2098         case IP_ROUTER_ALERT:
2099         case IP_RECVOPTS:
2100         case IP_RETOPTS:
2101         case IP_PKTINFO:
2102         case IP_MTU_DISCOVER:
2103         case IP_RECVERR:
2104         case IP_RECVTTL:
2105         case IP_RECVTOS:
2106 #ifdef IP_FREEBIND
2107         case IP_FREEBIND:
2108 #endif
2109         case IP_MULTICAST_TTL:
2110         case IP_MULTICAST_LOOP:
2111             val = 0;
2112             if (optlen >= sizeof(uint32_t)) {
2113                 if (get_user_u32(val, optval_addr))
2114                     return -TARGET_EFAULT;
2115             } else if (optlen >= 1) {
2116                 if (get_user_u8(val, optval_addr))
2117                     return -TARGET_EFAULT;
2118             }
2119             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2120             break;
2121         case IP_ADD_MEMBERSHIP:
2122         case IP_DROP_MEMBERSHIP:
2123         {
2124             struct ip_mreqn ip_mreq;
2125             struct target_ip_mreqn *target_smreqn;
2126 
2127             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2128                               sizeof(struct target_ip_mreq));
2129 
2130             if (optlen < sizeof (struct target_ip_mreq) ||
2131                 optlen > sizeof (struct target_ip_mreqn)) {
2132                 return -TARGET_EINVAL;
2133             }
2134 
2135             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2136             if (!target_smreqn) {
2137                 return -TARGET_EFAULT;
2138             }
2139             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2140             ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2141             if (optlen == sizeof(struct target_ip_mreqn)) {
2142                 ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex);
2143                 optlen = sizeof(struct ip_mreqn);
2144             }
2145             unlock_user(target_smreqn, optval_addr, 0);
2146 
2147             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2148             break;
2149         }
2150         case IP_BLOCK_SOURCE:
2151         case IP_UNBLOCK_SOURCE:
2152         case IP_ADD_SOURCE_MEMBERSHIP:
2153         case IP_DROP_SOURCE_MEMBERSHIP:
2154         {
2155             struct ip_mreq_source *ip_mreq_source;
2156 
2157             if (optlen != sizeof (struct target_ip_mreq_source))
2158                 return -TARGET_EINVAL;
2159 
2160             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2161             if (!ip_mreq_source) {
2162                 return -TARGET_EFAULT;
2163             }
2164             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2165             unlock_user (ip_mreq_source, optval_addr, 0);
2166             break;
2167         }
2168         default:
2169             goto unimplemented;
2170         }
2171         break;
2172     case SOL_IPV6:
2173         switch (optname) {
2174         case IPV6_MTU_DISCOVER:
2175         case IPV6_MTU:
2176         case IPV6_V6ONLY:
2177         case IPV6_RECVPKTINFO:
2178         case IPV6_UNICAST_HOPS:
2179         case IPV6_MULTICAST_HOPS:
2180         case IPV6_MULTICAST_LOOP:
2181         case IPV6_RECVERR:
2182         case IPV6_RECVHOPLIMIT:
2183         case IPV6_2292HOPLIMIT:
2184         case IPV6_CHECKSUM:
2185         case IPV6_ADDRFORM:
2186         case IPV6_2292PKTINFO:
2187         case IPV6_RECVTCLASS:
2188         case IPV6_RECVRTHDR:
2189         case IPV6_2292RTHDR:
2190         case IPV6_RECVHOPOPTS:
2191         case IPV6_2292HOPOPTS:
2192         case IPV6_RECVDSTOPTS:
2193         case IPV6_2292DSTOPTS:
2194         case IPV6_TCLASS:
2195         case IPV6_ADDR_PREFERENCES:
2196 #ifdef IPV6_RECVPATHMTU
2197         case IPV6_RECVPATHMTU:
2198 #endif
2199 #ifdef IPV6_TRANSPARENT
2200         case IPV6_TRANSPARENT:
2201 #endif
2202 #ifdef IPV6_FREEBIND
2203         case IPV6_FREEBIND:
2204 #endif
2205 #ifdef IPV6_RECVORIGDSTADDR
2206         case IPV6_RECVORIGDSTADDR:
2207 #endif
2208             val = 0;
2209             if (optlen < sizeof(uint32_t)) {
2210                 return -TARGET_EINVAL;
2211             }
2212             if (get_user_u32(val, optval_addr)) {
2213                 return -TARGET_EFAULT;
2214             }
2215             ret = get_errno(setsockopt(sockfd, level, optname,
2216                                        &val, sizeof(val)));
2217             break;
2218         case IPV6_PKTINFO:
2219         {
2220             struct in6_pktinfo pki;
2221 
2222             if (optlen < sizeof(pki)) {
2223                 return -TARGET_EINVAL;
2224             }
2225 
2226             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2227                 return -TARGET_EFAULT;
2228             }
2229 
2230             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2231 
2232             ret = get_errno(setsockopt(sockfd, level, optname,
2233                                        &pki, sizeof(pki)));
2234             break;
2235         }
2236         case IPV6_ADD_MEMBERSHIP:
2237         case IPV6_DROP_MEMBERSHIP:
2238         {
2239             struct ipv6_mreq ipv6mreq;
2240 
2241             if (optlen < sizeof(ipv6mreq)) {
2242                 return -TARGET_EINVAL;
2243             }
2244 
2245             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2246                 return -TARGET_EFAULT;
2247             }
2248 
2249             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2250 
2251             ret = get_errno(setsockopt(sockfd, level, optname,
2252                                        &ipv6mreq, sizeof(ipv6mreq)));
2253             break;
2254         }
2255         default:
2256             goto unimplemented;
2257         }
2258         break;
2259     case SOL_ICMPV6:
2260         switch (optname) {
2261         case ICMPV6_FILTER:
2262         {
2263             struct icmp6_filter icmp6f;
2264 
2265             if (optlen > sizeof(icmp6f)) {
2266                 optlen = sizeof(icmp6f);
2267             }
2268 
2269             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2270                 return -TARGET_EFAULT;
2271             }
2272 
2273             for (val = 0; val < 8; val++) {
2274                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2275             }
2276 
2277             ret = get_errno(setsockopt(sockfd, level, optname,
2278                                        &icmp6f, optlen));
2279             break;
2280         }
2281         default:
2282             goto unimplemented;
2283         }
2284         break;
2285     case SOL_RAW:
2286         switch (optname) {
2287         case ICMP_FILTER:
2288         case IPV6_CHECKSUM:
2289             /* those take an u32 value */
2290             if (optlen < sizeof(uint32_t)) {
2291                 return -TARGET_EINVAL;
2292             }
2293 
2294             if (get_user_u32(val, optval_addr)) {
2295                 return -TARGET_EFAULT;
2296             }
2297             ret = get_errno(setsockopt(sockfd, level, optname,
2298                                        &val, sizeof(val)));
2299             break;
2300 
2301         default:
2302             goto unimplemented;
2303         }
2304         break;
2305 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2306     case SOL_ALG:
2307         switch (optname) {
2308         case ALG_SET_KEY:
2309         {
2310             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2311             if (!alg_key) {
2312                 return -TARGET_EFAULT;
2313             }
2314             ret = get_errno(setsockopt(sockfd, level, optname,
2315                                        alg_key, optlen));
2316             unlock_user(alg_key, optval_addr, optlen);
2317             break;
2318         }
2319         case ALG_SET_AEAD_AUTHSIZE:
2320         {
2321             ret = get_errno(setsockopt(sockfd, level, optname,
2322                                        NULL, optlen));
2323             break;
2324         }
2325         default:
2326             goto unimplemented;
2327         }
2328         break;
2329 #endif
2330     case TARGET_SOL_SOCKET:
2331         switch (optname) {
2332         case TARGET_SO_RCVTIMEO:
2333         case TARGET_SO_SNDTIMEO:
2334         {
2335                 struct timeval tv;
2336 
2337                 if (optlen != sizeof(struct target_timeval)) {
2338                     return -TARGET_EINVAL;
2339                 }
2340 
2341                 if (copy_from_user_timeval(&tv, optval_addr)) {
2342                     return -TARGET_EFAULT;
2343                 }
2344 
2345                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2346                                 optname == TARGET_SO_RCVTIMEO ?
2347                                     SO_RCVTIMEO : SO_SNDTIMEO,
2348                                 &tv, sizeof(tv)));
2349                 return ret;
2350         }
2351         case TARGET_SO_ATTACH_FILTER:
2352         {
2353                 struct target_sock_fprog *tfprog;
2354                 struct target_sock_filter *tfilter;
2355                 struct sock_fprog fprog;
2356                 struct sock_filter *filter;
2357                 int i;
2358 
2359                 if (optlen != sizeof(*tfprog)) {
2360                     return -TARGET_EINVAL;
2361                 }
2362                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2363                     return -TARGET_EFAULT;
2364                 }
2365                 if (!lock_user_struct(VERIFY_READ, tfilter,
2366                                       tswapal(tfprog->filter), 0)) {
2367                     unlock_user_struct(tfprog, optval_addr, 1);
2368                     return -TARGET_EFAULT;
2369                 }
2370 
2371                 fprog.len = tswap16(tfprog->len);
2372                 filter = g_try_new(struct sock_filter, fprog.len);
2373                 if (filter == NULL) {
2374                     unlock_user_struct(tfilter, tfprog->filter, 1);
2375                     unlock_user_struct(tfprog, optval_addr, 1);
2376                     return -TARGET_ENOMEM;
2377                 }
2378                 for (i = 0; i < fprog.len; i++) {
2379                     filter[i].code = tswap16(tfilter[i].code);
2380                     filter[i].jt = tfilter[i].jt;
2381                     filter[i].jf = tfilter[i].jf;
2382                     filter[i].k = tswap32(tfilter[i].k);
2383                 }
2384                 fprog.filter = filter;
2385 
2386                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2387                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2388                 g_free(filter);
2389 
2390                 unlock_user_struct(tfilter, tfprog->filter, 1);
2391                 unlock_user_struct(tfprog, optval_addr, 1);
2392                 return ret;
2393         }
2394 	case TARGET_SO_BINDTODEVICE:
2395 	{
2396 		char *dev_ifname, *addr_ifname;
2397 
2398 		if (optlen > IFNAMSIZ - 1) {
2399 		    optlen = IFNAMSIZ - 1;
2400 		}
2401 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2402 		if (!dev_ifname) {
2403 		    return -TARGET_EFAULT;
2404 		}
2405 		optname = SO_BINDTODEVICE;
2406 		addr_ifname = alloca(IFNAMSIZ);
2407 		memcpy(addr_ifname, dev_ifname, optlen);
2408 		addr_ifname[optlen] = 0;
2409 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2410                                            addr_ifname, optlen));
2411 		unlock_user (dev_ifname, optval_addr, 0);
2412 		return ret;
2413 	}
2414         case TARGET_SO_LINGER:
2415         {
2416                 struct linger lg;
2417                 struct target_linger *tlg;
2418 
2419                 if (optlen != sizeof(struct target_linger)) {
2420                     return -TARGET_EINVAL;
2421                 }
2422                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2423                     return -TARGET_EFAULT;
2424                 }
2425                 __get_user(lg.l_onoff, &tlg->l_onoff);
2426                 __get_user(lg.l_linger, &tlg->l_linger);
2427                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2428                                 &lg, sizeof(lg)));
2429                 unlock_user_struct(tlg, optval_addr, 0);
2430                 return ret;
2431         }
2432             /* Options with 'int' argument.  */
2433         case TARGET_SO_DEBUG:
2434 		optname = SO_DEBUG;
2435 		break;
2436         case TARGET_SO_REUSEADDR:
2437 		optname = SO_REUSEADDR;
2438 		break;
2439 #ifdef SO_REUSEPORT
2440         case TARGET_SO_REUSEPORT:
2441                 optname = SO_REUSEPORT;
2442                 break;
2443 #endif
2444         case TARGET_SO_TYPE:
2445 		optname = SO_TYPE;
2446 		break;
2447         case TARGET_SO_ERROR:
2448 		optname = SO_ERROR;
2449 		break;
2450         case TARGET_SO_DONTROUTE:
2451 		optname = SO_DONTROUTE;
2452 		break;
2453         case TARGET_SO_BROADCAST:
2454 		optname = SO_BROADCAST;
2455 		break;
2456         case TARGET_SO_SNDBUF:
2457 		optname = SO_SNDBUF;
2458 		break;
2459         case TARGET_SO_SNDBUFFORCE:
2460                 optname = SO_SNDBUFFORCE;
2461                 break;
2462         case TARGET_SO_RCVBUF:
2463 		optname = SO_RCVBUF;
2464 		break;
2465         case TARGET_SO_RCVBUFFORCE:
2466                 optname = SO_RCVBUFFORCE;
2467                 break;
2468         case TARGET_SO_KEEPALIVE:
2469 		optname = SO_KEEPALIVE;
2470 		break;
2471         case TARGET_SO_OOBINLINE:
2472 		optname = SO_OOBINLINE;
2473 		break;
2474         case TARGET_SO_NO_CHECK:
2475 		optname = SO_NO_CHECK;
2476 		break;
2477         case TARGET_SO_PRIORITY:
2478 		optname = SO_PRIORITY;
2479 		break;
2480 #ifdef SO_BSDCOMPAT
2481         case TARGET_SO_BSDCOMPAT:
2482 		optname = SO_BSDCOMPAT;
2483 		break;
2484 #endif
2485         case TARGET_SO_PASSCRED:
2486 		optname = SO_PASSCRED;
2487 		break;
2488         case TARGET_SO_PASSSEC:
2489                 optname = SO_PASSSEC;
2490                 break;
2491         case TARGET_SO_TIMESTAMP:
2492 		optname = SO_TIMESTAMP;
2493 		break;
2494         case TARGET_SO_RCVLOWAT:
2495 		optname = SO_RCVLOWAT;
2496 		break;
2497         default:
2498             goto unimplemented;
2499         }
2500 	if (optlen < sizeof(uint32_t))
2501             return -TARGET_EINVAL;
2502 
2503 	if (get_user_u32(val, optval_addr))
2504             return -TARGET_EFAULT;
2505 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2506         break;
2507 #ifdef SOL_NETLINK
2508     case SOL_NETLINK:
2509         switch (optname) {
2510         case NETLINK_PKTINFO:
2511         case NETLINK_ADD_MEMBERSHIP:
2512         case NETLINK_DROP_MEMBERSHIP:
2513         case NETLINK_BROADCAST_ERROR:
2514         case NETLINK_NO_ENOBUFS:
2515 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2516         case NETLINK_LISTEN_ALL_NSID:
2517         case NETLINK_CAP_ACK:
2518 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2519 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2520         case NETLINK_EXT_ACK:
2521 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2522 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2523         case NETLINK_GET_STRICT_CHK:
2524 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2525             break;
2526         default:
2527             goto unimplemented;
2528         }
2529         val = 0;
2530         if (optlen < sizeof(uint32_t)) {
2531             return -TARGET_EINVAL;
2532         }
2533         if (get_user_u32(val, optval_addr)) {
2534             return -TARGET_EFAULT;
2535         }
2536         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2537                                    sizeof(val)));
2538         break;
2539 #endif /* SOL_NETLINK */
2540     default:
2541     unimplemented:
2542         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2543                       level, optname);
2544         ret = -TARGET_ENOPROTOOPT;
2545     }
2546     return ret;
2547 }
2548 
2549 /* do_getsockopt() Must return target values and target errnos. */
2550 static abi_long do_getsockopt(int sockfd, int level, int optname,
2551                               abi_ulong optval_addr, abi_ulong optlen)
2552 {
2553     abi_long ret;
2554     int len, val;
2555     socklen_t lv;
2556 
2557     switch(level) {
2558     case TARGET_SOL_SOCKET:
2559         level = SOL_SOCKET;
2560         switch (optname) {
2561         /* These don't just return a single integer */
2562         case TARGET_SO_PEERNAME:
2563             goto unimplemented;
2564         case TARGET_SO_RCVTIMEO: {
2565             struct timeval tv;
2566             socklen_t tvlen;
2567 
2568             optname = SO_RCVTIMEO;
2569 
2570 get_timeout:
2571             if (get_user_u32(len, optlen)) {
2572                 return -TARGET_EFAULT;
2573             }
2574             if (len < 0) {
2575                 return -TARGET_EINVAL;
2576             }
2577 
2578             tvlen = sizeof(tv);
2579             ret = get_errno(getsockopt(sockfd, level, optname,
2580                                        &tv, &tvlen));
2581             if (ret < 0) {
2582                 return ret;
2583             }
2584             if (len > sizeof(struct target_timeval)) {
2585                 len = sizeof(struct target_timeval);
2586             }
2587             if (copy_to_user_timeval(optval_addr, &tv)) {
2588                 return -TARGET_EFAULT;
2589             }
2590             if (put_user_u32(len, optlen)) {
2591                 return -TARGET_EFAULT;
2592             }
2593             break;
2594         }
2595         case TARGET_SO_SNDTIMEO:
2596             optname = SO_SNDTIMEO;
2597             goto get_timeout;
2598         case TARGET_SO_PEERCRED: {
2599             struct ucred cr;
2600             socklen_t crlen;
2601             struct target_ucred *tcr;
2602 
2603             if (get_user_u32(len, optlen)) {
2604                 return -TARGET_EFAULT;
2605             }
2606             if (len < 0) {
2607                 return -TARGET_EINVAL;
2608             }
2609 
2610             crlen = sizeof(cr);
2611             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2612                                        &cr, &crlen));
2613             if (ret < 0) {
2614                 return ret;
2615             }
2616             if (len > crlen) {
2617                 len = crlen;
2618             }
2619             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2620                 return -TARGET_EFAULT;
2621             }
2622             __put_user(cr.pid, &tcr->pid);
2623             __put_user(cr.uid, &tcr->uid);
2624             __put_user(cr.gid, &tcr->gid);
2625             unlock_user_struct(tcr, optval_addr, 1);
2626             if (put_user_u32(len, optlen)) {
2627                 return -TARGET_EFAULT;
2628             }
2629             break;
2630         }
2631         case TARGET_SO_PEERSEC: {
2632             char *name;
2633 
2634             if (get_user_u32(len, optlen)) {
2635                 return -TARGET_EFAULT;
2636             }
2637             if (len < 0) {
2638                 return -TARGET_EINVAL;
2639             }
2640             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2641             if (!name) {
2642                 return -TARGET_EFAULT;
2643             }
2644             lv = len;
2645             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2646                                        name, &lv));
2647             if (put_user_u32(lv, optlen)) {
2648                 ret = -TARGET_EFAULT;
2649             }
2650             unlock_user(name, optval_addr, lv);
2651             break;
2652         }
2653         case TARGET_SO_LINGER:
2654         {
2655             struct linger lg;
2656             socklen_t lglen;
2657             struct target_linger *tlg;
2658 
2659             if (get_user_u32(len, optlen)) {
2660                 return -TARGET_EFAULT;
2661             }
2662             if (len < 0) {
2663                 return -TARGET_EINVAL;
2664             }
2665 
2666             lglen = sizeof(lg);
2667             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2668                                        &lg, &lglen));
2669             if (ret < 0) {
2670                 return ret;
2671             }
2672             if (len > lglen) {
2673                 len = lglen;
2674             }
2675             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2676                 return -TARGET_EFAULT;
2677             }
2678             __put_user(lg.l_onoff, &tlg->l_onoff);
2679             __put_user(lg.l_linger, &tlg->l_linger);
2680             unlock_user_struct(tlg, optval_addr, 1);
2681             if (put_user_u32(len, optlen)) {
2682                 return -TARGET_EFAULT;
2683             }
2684             break;
2685         }
2686         /* Options with 'int' argument.  */
2687         case TARGET_SO_DEBUG:
2688             optname = SO_DEBUG;
2689             goto int_case;
2690         case TARGET_SO_REUSEADDR:
2691             optname = SO_REUSEADDR;
2692             goto int_case;
2693 #ifdef SO_REUSEPORT
2694         case TARGET_SO_REUSEPORT:
2695             optname = SO_REUSEPORT;
2696             goto int_case;
2697 #endif
2698         case TARGET_SO_TYPE:
2699             optname = SO_TYPE;
2700             goto int_case;
2701         case TARGET_SO_ERROR:
2702             optname = SO_ERROR;
2703             goto int_case;
2704         case TARGET_SO_DONTROUTE:
2705             optname = SO_DONTROUTE;
2706             goto int_case;
2707         case TARGET_SO_BROADCAST:
2708             optname = SO_BROADCAST;
2709             goto int_case;
2710         case TARGET_SO_SNDBUF:
2711             optname = SO_SNDBUF;
2712             goto int_case;
2713         case TARGET_SO_RCVBUF:
2714             optname = SO_RCVBUF;
2715             goto int_case;
2716         case TARGET_SO_KEEPALIVE:
2717             optname = SO_KEEPALIVE;
2718             goto int_case;
2719         case TARGET_SO_OOBINLINE:
2720             optname = SO_OOBINLINE;
2721             goto int_case;
2722         case TARGET_SO_NO_CHECK:
2723             optname = SO_NO_CHECK;
2724             goto int_case;
2725         case TARGET_SO_PRIORITY:
2726             optname = SO_PRIORITY;
2727             goto int_case;
2728 #ifdef SO_BSDCOMPAT
2729         case TARGET_SO_BSDCOMPAT:
2730             optname = SO_BSDCOMPAT;
2731             goto int_case;
2732 #endif
2733         case TARGET_SO_PASSCRED:
2734             optname = SO_PASSCRED;
2735             goto int_case;
2736         case TARGET_SO_TIMESTAMP:
2737             optname = SO_TIMESTAMP;
2738             goto int_case;
2739         case TARGET_SO_RCVLOWAT:
2740             optname = SO_RCVLOWAT;
2741             goto int_case;
2742         case TARGET_SO_ACCEPTCONN:
2743             optname = SO_ACCEPTCONN;
2744             goto int_case;
2745         case TARGET_SO_PROTOCOL:
2746             optname = SO_PROTOCOL;
2747             goto int_case;
2748         case TARGET_SO_DOMAIN:
2749             optname = SO_DOMAIN;
2750             goto int_case;
2751         default:
2752             goto int_case;
2753         }
2754         break;
2755     case SOL_TCP:
2756     case SOL_UDP:
2757         /* TCP and UDP options all take an 'int' value.  */
2758     int_case:
2759         if (get_user_u32(len, optlen))
2760             return -TARGET_EFAULT;
2761         if (len < 0)
2762             return -TARGET_EINVAL;
2763         lv = sizeof(lv);
2764         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2765         if (ret < 0)
2766             return ret;
2767         switch (optname) {
2768         case SO_TYPE:
2769             val = host_to_target_sock_type(val);
2770             break;
2771         case SO_ERROR:
2772             val = host_to_target_errno(val);
2773             break;
2774         }
2775         if (len > lv)
2776             len = lv;
2777         if (len == 4) {
2778             if (put_user_u32(val, optval_addr))
2779                 return -TARGET_EFAULT;
2780         } else {
2781             if (put_user_u8(val, optval_addr))
2782                 return -TARGET_EFAULT;
2783         }
2784         if (put_user_u32(len, optlen))
2785             return -TARGET_EFAULT;
2786         break;
2787     case SOL_IP:
2788         switch(optname) {
2789         case IP_TOS:
2790         case IP_TTL:
2791         case IP_HDRINCL:
2792         case IP_ROUTER_ALERT:
2793         case IP_RECVOPTS:
2794         case IP_RETOPTS:
2795         case IP_PKTINFO:
2796         case IP_MTU_DISCOVER:
2797         case IP_RECVERR:
2798         case IP_RECVTOS:
2799 #ifdef IP_FREEBIND
2800         case IP_FREEBIND:
2801 #endif
2802         case IP_MULTICAST_TTL:
2803         case IP_MULTICAST_LOOP:
2804             if (get_user_u32(len, optlen))
2805                 return -TARGET_EFAULT;
2806             if (len < 0)
2807                 return -TARGET_EINVAL;
2808             lv = sizeof(lv);
2809             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2810             if (ret < 0)
2811                 return ret;
2812             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2813                 len = 1;
2814                 if (put_user_u32(len, optlen)
2815                     || put_user_u8(val, optval_addr))
2816                     return -TARGET_EFAULT;
2817             } else {
2818                 if (len > sizeof(int))
2819                     len = sizeof(int);
2820                 if (put_user_u32(len, optlen)
2821                     || put_user_u32(val, optval_addr))
2822                     return -TARGET_EFAULT;
2823             }
2824             break;
2825         default:
2826             ret = -TARGET_ENOPROTOOPT;
2827             break;
2828         }
2829         break;
2830     case SOL_IPV6:
2831         switch (optname) {
2832         case IPV6_MTU_DISCOVER:
2833         case IPV6_MTU:
2834         case IPV6_V6ONLY:
2835         case IPV6_RECVPKTINFO:
2836         case IPV6_UNICAST_HOPS:
2837         case IPV6_MULTICAST_HOPS:
2838         case IPV6_MULTICAST_LOOP:
2839         case IPV6_RECVERR:
2840         case IPV6_RECVHOPLIMIT:
2841         case IPV6_2292HOPLIMIT:
2842         case IPV6_CHECKSUM:
2843         case IPV6_ADDRFORM:
2844         case IPV6_2292PKTINFO:
2845         case IPV6_RECVTCLASS:
2846         case IPV6_RECVRTHDR:
2847         case IPV6_2292RTHDR:
2848         case IPV6_RECVHOPOPTS:
2849         case IPV6_2292HOPOPTS:
2850         case IPV6_RECVDSTOPTS:
2851         case IPV6_2292DSTOPTS:
2852         case IPV6_TCLASS:
2853         case IPV6_ADDR_PREFERENCES:
2854 #ifdef IPV6_RECVPATHMTU
2855         case IPV6_RECVPATHMTU:
2856 #endif
2857 #ifdef IPV6_TRANSPARENT
2858         case IPV6_TRANSPARENT:
2859 #endif
2860 #ifdef IPV6_FREEBIND
2861         case IPV6_FREEBIND:
2862 #endif
2863 #ifdef IPV6_RECVORIGDSTADDR
2864         case IPV6_RECVORIGDSTADDR:
2865 #endif
2866             if (get_user_u32(len, optlen))
2867                 return -TARGET_EFAULT;
2868             if (len < 0)
2869                 return -TARGET_EINVAL;
2870             lv = sizeof(lv);
2871             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2872             if (ret < 0)
2873                 return ret;
2874             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2875                 len = 1;
2876                 if (put_user_u32(len, optlen)
2877                     || put_user_u8(val, optval_addr))
2878                     return -TARGET_EFAULT;
2879             } else {
2880                 if (len > sizeof(int))
2881                     len = sizeof(int);
2882                 if (put_user_u32(len, optlen)
2883                     || put_user_u32(val, optval_addr))
2884                     return -TARGET_EFAULT;
2885             }
2886             break;
2887         default:
2888             ret = -TARGET_ENOPROTOOPT;
2889             break;
2890         }
2891         break;
2892 #ifdef SOL_NETLINK
2893     case SOL_NETLINK:
2894         switch (optname) {
2895         case NETLINK_PKTINFO:
2896         case NETLINK_BROADCAST_ERROR:
2897         case NETLINK_NO_ENOBUFS:
2898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2899         case NETLINK_LISTEN_ALL_NSID:
2900         case NETLINK_CAP_ACK:
2901 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2902 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2903         case NETLINK_EXT_ACK:
2904 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2905 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2906         case NETLINK_GET_STRICT_CHK:
2907 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2908             if (get_user_u32(len, optlen)) {
2909                 return -TARGET_EFAULT;
2910             }
2911             if (len != sizeof(val)) {
2912                 return -TARGET_EINVAL;
2913             }
2914             lv = len;
2915             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2916             if (ret < 0) {
2917                 return ret;
2918             }
2919             if (put_user_u32(lv, optlen)
2920                 || put_user_u32(val, optval_addr)) {
2921                 return -TARGET_EFAULT;
2922             }
2923             break;
2924 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2925         case NETLINK_LIST_MEMBERSHIPS:
2926         {
2927             uint32_t *results;
2928             int i;
2929             if (get_user_u32(len, optlen)) {
2930                 return -TARGET_EFAULT;
2931             }
2932             if (len < 0) {
2933                 return -TARGET_EINVAL;
2934             }
2935             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2936             if (!results && len > 0) {
2937                 return -TARGET_EFAULT;
2938             }
2939             lv = len;
2940             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2941             if (ret < 0) {
2942                 unlock_user(results, optval_addr, 0);
2943                 return ret;
2944             }
2945             /* swap host endianness to target endianness. */
2946             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2947                 results[i] = tswap32(results[i]);
2948             }
2949             if (put_user_u32(lv, optlen)) {
2950                 return -TARGET_EFAULT;
2951             }
2952             unlock_user(results, optval_addr, 0);
2953             break;
2954         }
2955 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2956         default:
2957             goto unimplemented;
2958         }
2959         break;
2960 #endif /* SOL_NETLINK */
2961     default:
2962     unimplemented:
2963         qemu_log_mask(LOG_UNIMP,
2964                       "getsockopt level=%d optname=%d not yet supported\n",
2965                       level, optname);
2966         ret = -TARGET_EOPNOTSUPP;
2967         break;
2968     }
2969     return ret;
2970 }
2971 
2972 /* Convert target low/high pair representing file offset into the host
2973  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2974  * as the kernel doesn't handle them either.
2975  */
2976 static void target_to_host_low_high(abi_ulong tlow,
2977                                     abi_ulong thigh,
2978                                     unsigned long *hlow,
2979                                     unsigned long *hhigh)
2980 {
2981     uint64_t off = tlow |
2982         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2983         TARGET_LONG_BITS / 2;
2984 
2985     *hlow = off;
2986     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2987 }
2988 
2989 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2990                                 abi_ulong count, int copy)
2991 {
2992     struct target_iovec *target_vec;
2993     struct iovec *vec;
2994     abi_ulong total_len, max_len;
2995     int i;
2996     int err = 0;
2997     bool bad_address = false;
2998 
2999     if (count == 0) {
3000         errno = 0;
3001         return NULL;
3002     }
3003     if (count > IOV_MAX) {
3004         errno = EINVAL;
3005         return NULL;
3006     }
3007 
3008     vec = g_try_new0(struct iovec, count);
3009     if (vec == NULL) {
3010         errno = ENOMEM;
3011         return NULL;
3012     }
3013 
3014     target_vec = lock_user(VERIFY_READ, target_addr,
3015                            count * sizeof(struct target_iovec), 1);
3016     if (target_vec == NULL) {
3017         err = EFAULT;
3018         goto fail2;
3019     }
3020 
3021     /* ??? If host page size > target page size, this will result in a
3022        value larger than what we can actually support.  */
3023     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3024     total_len = 0;
3025 
3026     for (i = 0; i < count; i++) {
3027         abi_ulong base = tswapal(target_vec[i].iov_base);
3028         abi_long len = tswapal(target_vec[i].iov_len);
3029 
3030         if (len < 0) {
3031             err = EINVAL;
3032             goto fail;
3033         } else if (len == 0) {
3034             /* Zero length pointer is ignored.  */
3035             vec[i].iov_base = 0;
3036         } else {
3037             vec[i].iov_base = lock_user(type, base, len, copy);
3038             /* If the first buffer pointer is bad, this is a fault.  But
3039              * subsequent bad buffers will result in a partial write; this
3040              * is realized by filling the vector with null pointers and
3041              * zero lengths. */
3042             if (!vec[i].iov_base) {
3043                 if (i == 0) {
3044                     err = EFAULT;
3045                     goto fail;
3046                 } else {
3047                     bad_address = true;
3048                 }
3049             }
3050             if (bad_address) {
3051                 len = 0;
3052             }
3053             if (len > max_len - total_len) {
3054                 len = max_len - total_len;
3055             }
3056         }
3057         vec[i].iov_len = len;
3058         total_len += len;
3059     }
3060 
3061     unlock_user(target_vec, target_addr, 0);
3062     return vec;
3063 
3064  fail:
3065     while (--i >= 0) {
3066         if (tswapal(target_vec[i].iov_len) > 0) {
3067             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3068         }
3069     }
3070     unlock_user(target_vec, target_addr, 0);
3071  fail2:
3072     g_free(vec);
3073     errno = err;
3074     return NULL;
3075 }
3076 
3077 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3078                          abi_ulong count, int copy)
3079 {
3080     struct target_iovec *target_vec;
3081     int i;
3082 
3083     target_vec = lock_user(VERIFY_READ, target_addr,
3084                            count * sizeof(struct target_iovec), 1);
3085     if (target_vec) {
3086         for (i = 0; i < count; i++) {
3087             abi_ulong base = tswapal(target_vec[i].iov_base);
3088             abi_long len = tswapal(target_vec[i].iov_len);
3089             if (len < 0) {
3090                 break;
3091             }
3092             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3093         }
3094         unlock_user(target_vec, target_addr, 0);
3095     }
3096 
3097     g_free(vec);
3098 }
3099 
3100 static inline int target_to_host_sock_type(int *type)
3101 {
3102     int host_type = 0;
3103     int target_type = *type;
3104 
3105     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3106     case TARGET_SOCK_DGRAM:
3107         host_type = SOCK_DGRAM;
3108         break;
3109     case TARGET_SOCK_STREAM:
3110         host_type = SOCK_STREAM;
3111         break;
3112     default:
3113         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3114         break;
3115     }
3116     if (target_type & TARGET_SOCK_CLOEXEC) {
3117 #if defined(SOCK_CLOEXEC)
3118         host_type |= SOCK_CLOEXEC;
3119 #else
3120         return -TARGET_EINVAL;
3121 #endif
3122     }
3123     if (target_type & TARGET_SOCK_NONBLOCK) {
3124 #if defined(SOCK_NONBLOCK)
3125         host_type |= SOCK_NONBLOCK;
3126 #elif !defined(O_NONBLOCK)
3127         return -TARGET_EINVAL;
3128 #endif
3129     }
3130     *type = host_type;
3131     return 0;
3132 }
3133 
3134 /* Try to emulate socket type flags after socket creation.  */
3135 static int sock_flags_fixup(int fd, int target_type)
3136 {
3137 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3138     if (target_type & TARGET_SOCK_NONBLOCK) {
3139         int flags = fcntl(fd, F_GETFL);
3140         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3141             close(fd);
3142             return -TARGET_EINVAL;
3143         }
3144     }
3145 #endif
3146     return fd;
3147 }
3148 
3149 /* do_socket() Must return target values and target errnos. */
3150 static abi_long do_socket(int domain, int type, int protocol)
3151 {
3152     int target_type = type;
3153     int ret;
3154 
3155     ret = target_to_host_sock_type(&type);
3156     if (ret) {
3157         return ret;
3158     }
3159 
3160     if (domain == PF_NETLINK && !(
3161 #ifdef CONFIG_RTNETLINK
3162          protocol == NETLINK_ROUTE ||
3163 #endif
3164          protocol == NETLINK_KOBJECT_UEVENT ||
3165          protocol == NETLINK_AUDIT)) {
3166         return -TARGET_EPROTONOSUPPORT;
3167     }
3168 
3169     if (domain == AF_PACKET ||
3170         (domain == AF_INET && type == SOCK_PACKET)) {
3171         protocol = tswap16(protocol);
3172     }
3173 
3174     ret = get_errno(socket(domain, type, protocol));
3175     if (ret >= 0) {
3176         ret = sock_flags_fixup(ret, target_type);
3177         if (type == SOCK_PACKET) {
3178             /* Manage an obsolete case :
3179              * if socket type is SOCK_PACKET, bind by name
3180              */
3181             fd_trans_register(ret, &target_packet_trans);
3182         } else if (domain == PF_NETLINK) {
3183             switch (protocol) {
3184 #ifdef CONFIG_RTNETLINK
3185             case NETLINK_ROUTE:
3186                 fd_trans_register(ret, &target_netlink_route_trans);
3187                 break;
3188 #endif
3189             case NETLINK_KOBJECT_UEVENT:
3190                 /* nothing to do: messages are strings */
3191                 break;
3192             case NETLINK_AUDIT:
3193                 fd_trans_register(ret, &target_netlink_audit_trans);
3194                 break;
3195             default:
3196                 g_assert_not_reached();
3197             }
3198         }
3199     }
3200     return ret;
3201 }
3202 
3203 /* do_bind() Must return target values and target errnos. */
3204 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3205                         socklen_t addrlen)
3206 {
3207     void *addr;
3208     abi_long ret;
3209 
3210     if ((int)addrlen < 0) {
3211         return -TARGET_EINVAL;
3212     }
3213 
3214     addr = alloca(addrlen+1);
3215 
3216     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3217     if (ret)
3218         return ret;
3219 
3220     return get_errno(bind(sockfd, addr, addrlen));
3221 }
3222 
3223 /* do_connect() Must return target values and target errnos. */
3224 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3225                            socklen_t addrlen)
3226 {
3227     void *addr;
3228     abi_long ret;
3229 
3230     if ((int)addrlen < 0) {
3231         return -TARGET_EINVAL;
3232     }
3233 
3234     addr = alloca(addrlen+1);
3235 
3236     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3237     if (ret)
3238         return ret;
3239 
3240     return get_errno(safe_connect(sockfd, addr, addrlen));
3241 }
3242 
3243 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3244 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3245                                       int flags, int send)
3246 {
3247     abi_long ret, len;
3248     struct msghdr msg;
3249     abi_ulong count;
3250     struct iovec *vec;
3251     abi_ulong target_vec;
3252 
3253     if (msgp->msg_name) {
3254         msg.msg_namelen = tswap32(msgp->msg_namelen);
3255         msg.msg_name = alloca(msg.msg_namelen+1);
3256         ret = target_to_host_sockaddr(fd, msg.msg_name,
3257                                       tswapal(msgp->msg_name),
3258                                       msg.msg_namelen);
3259         if (ret == -TARGET_EFAULT) {
3260             /* For connected sockets msg_name and msg_namelen must
3261              * be ignored, so returning EFAULT immediately is wrong.
3262              * Instead, pass a bad msg_name to the host kernel, and
3263              * let it decide whether to return EFAULT or not.
3264              */
3265             msg.msg_name = (void *)-1;
3266         } else if (ret) {
3267             goto out2;
3268         }
3269     } else {
3270         msg.msg_name = NULL;
3271         msg.msg_namelen = 0;
3272     }
3273     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3274     msg.msg_control = alloca(msg.msg_controllen);
3275     memset(msg.msg_control, 0, msg.msg_controllen);
3276 
3277     msg.msg_flags = tswap32(msgp->msg_flags);
3278 
3279     count = tswapal(msgp->msg_iovlen);
3280     target_vec = tswapal(msgp->msg_iov);
3281 
3282     if (count > IOV_MAX) {
3283         /* sendrcvmsg returns a different errno for this condition than
3284          * readv/writev, so we must catch it here before lock_iovec() does.
3285          */
3286         ret = -TARGET_EMSGSIZE;
3287         goto out2;
3288     }
3289 
3290     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3291                      target_vec, count, send);
3292     if (vec == NULL) {
3293         ret = -host_to_target_errno(errno);
3294         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3295         if (!send || ret) {
3296             goto out2;
3297         }
3298     }
3299     msg.msg_iovlen = count;
3300     msg.msg_iov = vec;
3301 
3302     if (send) {
3303         if (fd_trans_target_to_host_data(fd)) {
3304             void *host_msg;
3305 
3306             host_msg = g_malloc(msg.msg_iov->iov_len);
3307             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3308             ret = fd_trans_target_to_host_data(fd)(host_msg,
3309                                                    msg.msg_iov->iov_len);
3310             if (ret >= 0) {
3311                 msg.msg_iov->iov_base = host_msg;
3312                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3313             }
3314             g_free(host_msg);
3315         } else {
3316             ret = target_to_host_cmsg(&msg, msgp);
3317             if (ret == 0) {
3318                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3319             }
3320         }
3321     } else {
3322         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3323         if (!is_error(ret)) {
3324             len = ret;
3325             if (fd_trans_host_to_target_data(fd)) {
3326                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3327                                                MIN(msg.msg_iov->iov_len, len));
3328             }
3329             if (!is_error(ret)) {
3330                 ret = host_to_target_cmsg(msgp, &msg);
3331             }
3332             if (!is_error(ret)) {
3333                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3334                 msgp->msg_flags = tswap32(msg.msg_flags);
3335                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3336                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3337                                     msg.msg_name, msg.msg_namelen);
3338                     if (ret) {
3339                         goto out;
3340                     }
3341                 }
3342 
3343                 ret = len;
3344             }
3345         }
3346     }
3347 
3348 out:
3349     if (vec) {
3350         unlock_iovec(vec, target_vec, count, !send);
3351     }
3352 out2:
3353     return ret;
3354 }
3355 
3356 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3357                                int flags, int send)
3358 {
3359     abi_long ret;
3360     struct target_msghdr *msgp;
3361 
3362     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3363                           msgp,
3364                           target_msg,
3365                           send ? 1 : 0)) {
3366         return -TARGET_EFAULT;
3367     }
3368     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3369     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3370     return ret;
3371 }
3372 
3373 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3374  * so it might not have this *mmsg-specific flag either.
3375  */
3376 #ifndef MSG_WAITFORONE
3377 #define MSG_WAITFORONE 0x10000
3378 #endif
3379 
3380 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3381                                 unsigned int vlen, unsigned int flags,
3382                                 int send)
3383 {
3384     struct target_mmsghdr *mmsgp;
3385     abi_long ret = 0;
3386     int i;
3387 
3388     if (vlen > UIO_MAXIOV) {
3389         vlen = UIO_MAXIOV;
3390     }
3391 
3392     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3393     if (!mmsgp) {
3394         return -TARGET_EFAULT;
3395     }
3396 
3397     for (i = 0; i < vlen; i++) {
3398         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3399         if (is_error(ret)) {
3400             break;
3401         }
3402         mmsgp[i].msg_len = tswap32(ret);
3403         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3404         if (flags & MSG_WAITFORONE) {
3405             flags |= MSG_DONTWAIT;
3406         }
3407     }
3408 
3409     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3410 
3411     /* Return number of datagrams sent if we sent any at all;
3412      * otherwise return the error.
3413      */
3414     if (i) {
3415         return i;
3416     }
3417     return ret;
3418 }
3419 
3420 /* do_accept4() Must return target values and target errnos. */
3421 static abi_long do_accept4(int fd, abi_ulong target_addr,
3422                            abi_ulong target_addrlen_addr, int flags)
3423 {
3424     socklen_t addrlen, ret_addrlen;
3425     void *addr;
3426     abi_long ret;
3427     int host_flags;
3428 
3429     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3430         return -TARGET_EINVAL;
3431     }
3432 
3433     host_flags = 0;
3434     if (flags & TARGET_SOCK_NONBLOCK) {
3435         host_flags |= SOCK_NONBLOCK;
3436     }
3437     if (flags & TARGET_SOCK_CLOEXEC) {
3438         host_flags |= SOCK_CLOEXEC;
3439     }
3440 
3441     if (target_addr == 0) {
3442         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3443     }
3444 
3445     /* linux returns EFAULT if addrlen pointer is invalid */
3446     if (get_user_u32(addrlen, target_addrlen_addr))
3447         return -TARGET_EFAULT;
3448 
3449     if ((int)addrlen < 0) {
3450         return -TARGET_EINVAL;
3451     }
3452 
3453     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3454         return -TARGET_EFAULT;
3455     }
3456 
3457     addr = alloca(addrlen);
3458 
3459     ret_addrlen = addrlen;
3460     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3461     if (!is_error(ret)) {
3462         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3463         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3464             ret = -TARGET_EFAULT;
3465         }
3466     }
3467     return ret;
3468 }
3469 
3470 /* do_getpeername() Must return target values and target errnos. */
3471 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3472                                abi_ulong target_addrlen_addr)
3473 {
3474     socklen_t addrlen, ret_addrlen;
3475     void *addr;
3476     abi_long ret;
3477 
3478     if (get_user_u32(addrlen, target_addrlen_addr))
3479         return -TARGET_EFAULT;
3480 
3481     if ((int)addrlen < 0) {
3482         return -TARGET_EINVAL;
3483     }
3484 
3485     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3486         return -TARGET_EFAULT;
3487     }
3488 
3489     addr = alloca(addrlen);
3490 
3491     ret_addrlen = addrlen;
3492     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3493     if (!is_error(ret)) {
3494         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3495         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3496             ret = -TARGET_EFAULT;
3497         }
3498     }
3499     return ret;
3500 }
3501 
3502 /* do_getsockname() Must return target values and target errnos. */
3503 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3504                                abi_ulong target_addrlen_addr)
3505 {
3506     socklen_t addrlen, ret_addrlen;
3507     void *addr;
3508     abi_long ret;
3509 
3510     if (get_user_u32(addrlen, target_addrlen_addr))
3511         return -TARGET_EFAULT;
3512 
3513     if ((int)addrlen < 0) {
3514         return -TARGET_EINVAL;
3515     }
3516 
3517     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3518         return -TARGET_EFAULT;
3519     }
3520 
3521     addr = alloca(addrlen);
3522 
3523     ret_addrlen = addrlen;
3524     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3525     if (!is_error(ret)) {
3526         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3527         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3528             ret = -TARGET_EFAULT;
3529         }
3530     }
3531     return ret;
3532 }
3533 
3534 /* do_socketpair() Must return target values and target errnos. */
3535 static abi_long do_socketpair(int domain, int type, int protocol,
3536                               abi_ulong target_tab_addr)
3537 {
3538     int tab[2];
3539     abi_long ret;
3540 
3541     target_to_host_sock_type(&type);
3542 
3543     ret = get_errno(socketpair(domain, type, protocol, tab));
3544     if (!is_error(ret)) {
3545         if (put_user_s32(tab[0], target_tab_addr)
3546             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3547             ret = -TARGET_EFAULT;
3548     }
3549     return ret;
3550 }
3551 
3552 /* do_sendto() Must return target values and target errnos. */
3553 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3554                           abi_ulong target_addr, socklen_t addrlen)
3555 {
3556     void *addr;
3557     void *host_msg;
3558     void *copy_msg = NULL;
3559     abi_long ret;
3560 
3561     if ((int)addrlen < 0) {
3562         return -TARGET_EINVAL;
3563     }
3564 
3565     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3566     if (!host_msg)
3567         return -TARGET_EFAULT;
3568     if (fd_trans_target_to_host_data(fd)) {
3569         copy_msg = host_msg;
3570         host_msg = g_malloc(len);
3571         memcpy(host_msg, copy_msg, len);
3572         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3573         if (ret < 0) {
3574             goto fail;
3575         }
3576     }
3577     if (target_addr) {
3578         addr = alloca(addrlen+1);
3579         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3580         if (ret) {
3581             goto fail;
3582         }
3583         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3584     } else {
3585         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3586     }
3587 fail:
3588     if (copy_msg) {
3589         g_free(host_msg);
3590         host_msg = copy_msg;
3591     }
3592     unlock_user(host_msg, msg, 0);
3593     return ret;
3594 }
3595 
3596 /* do_recvfrom() Must return target values and target errnos. */
3597 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3598                             abi_ulong target_addr,
3599                             abi_ulong target_addrlen)
3600 {
3601     socklen_t addrlen, ret_addrlen;
3602     void *addr;
3603     void *host_msg;
3604     abi_long ret;
3605 
3606     if (!msg) {
3607         host_msg = NULL;
3608     } else {
3609         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3610         if (!host_msg) {
3611             return -TARGET_EFAULT;
3612         }
3613     }
3614     if (target_addr) {
3615         if (get_user_u32(addrlen, target_addrlen)) {
3616             ret = -TARGET_EFAULT;
3617             goto fail;
3618         }
3619         if ((int)addrlen < 0) {
3620             ret = -TARGET_EINVAL;
3621             goto fail;
3622         }
3623         addr = alloca(addrlen);
3624         ret_addrlen = addrlen;
3625         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3626                                       addr, &ret_addrlen));
3627     } else {
3628         addr = NULL; /* To keep compiler quiet.  */
3629         addrlen = 0; /* To keep compiler quiet.  */
3630         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3631     }
3632     if (!is_error(ret)) {
3633         if (fd_trans_host_to_target_data(fd)) {
3634             abi_long trans;
3635             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3636             if (is_error(trans)) {
3637                 ret = trans;
3638                 goto fail;
3639             }
3640         }
3641         if (target_addr) {
3642             host_to_target_sockaddr(target_addr, addr,
3643                                     MIN(addrlen, ret_addrlen));
3644             if (put_user_u32(ret_addrlen, target_addrlen)) {
3645                 ret = -TARGET_EFAULT;
3646                 goto fail;
3647             }
3648         }
3649         unlock_user(host_msg, msg, len);
3650     } else {
3651 fail:
3652         unlock_user(host_msg, msg, 0);
3653     }
3654     return ret;
3655 }
3656 
3657 #ifdef TARGET_NR_socketcall
3658 /* do_socketcall() must return target values and target errnos. */
3659 static abi_long do_socketcall(int num, abi_ulong vptr)
3660 {
3661     static const unsigned nargs[] = { /* number of arguments per operation */
3662         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3663         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3664         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3665         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3666         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3667         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3668         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3669         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3670         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3671         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3672         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3673         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3674         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3675         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3676         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3677         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3678         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3679         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3680         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3681         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3682     };
3683     abi_long a[6]; /* max 6 args */
3684     unsigned i;
3685 
3686     /* check the range of the first argument num */
3687     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3688     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3689         return -TARGET_EINVAL;
3690     }
3691     /* ensure we have space for args */
3692     if (nargs[num] > ARRAY_SIZE(a)) {
3693         return -TARGET_EINVAL;
3694     }
3695     /* collect the arguments in a[] according to nargs[] */
3696     for (i = 0; i < nargs[num]; ++i) {
3697         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3698             return -TARGET_EFAULT;
3699         }
3700     }
3701     /* now when we have the args, invoke the appropriate underlying function */
3702     switch (num) {
3703     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3704         return do_socket(a[0], a[1], a[2]);
3705     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3706         return do_bind(a[0], a[1], a[2]);
3707     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3708         return do_connect(a[0], a[1], a[2]);
3709     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3710         return get_errno(listen(a[0], a[1]));
3711     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3712         return do_accept4(a[0], a[1], a[2], 0);
3713     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3714         return do_getsockname(a[0], a[1], a[2]);
3715     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3716         return do_getpeername(a[0], a[1], a[2]);
3717     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3718         return do_socketpair(a[0], a[1], a[2], a[3]);
3719     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3720         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3721     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3722         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3723     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3724         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3725     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3726         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3727     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3728         return get_errno(shutdown(a[0], a[1]));
3729     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3730         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3731     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3732         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3733     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3734         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3735     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3736         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3737     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3738         return do_accept4(a[0], a[1], a[2], a[3]);
3739     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3740         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3741     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3742         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3743     default:
3744         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3745         return -TARGET_EINVAL;
3746     }
3747 }
3748 #endif
3749 
3750 #ifndef TARGET_SEMID64_DS
3751 /* asm-generic version of this struct */
3752 struct target_semid64_ds
3753 {
3754   struct target_ipc_perm sem_perm;
3755   abi_ulong sem_otime;
3756 #if TARGET_ABI_BITS == 32
3757   abi_ulong __unused1;
3758 #endif
3759   abi_ulong sem_ctime;
3760 #if TARGET_ABI_BITS == 32
3761   abi_ulong __unused2;
3762 #endif
3763   abi_ulong sem_nsems;
3764   abi_ulong __unused3;
3765   abi_ulong __unused4;
3766 };
3767 #endif
3768 
3769 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3770                                                abi_ulong target_addr)
3771 {
3772     struct target_ipc_perm *target_ip;
3773     struct target_semid64_ds *target_sd;
3774 
3775     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3776         return -TARGET_EFAULT;
3777     target_ip = &(target_sd->sem_perm);
3778     host_ip->__key = tswap32(target_ip->__key);
3779     host_ip->uid = tswap32(target_ip->uid);
3780     host_ip->gid = tswap32(target_ip->gid);
3781     host_ip->cuid = tswap32(target_ip->cuid);
3782     host_ip->cgid = tswap32(target_ip->cgid);
3783 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3784     host_ip->mode = tswap32(target_ip->mode);
3785 #else
3786     host_ip->mode = tswap16(target_ip->mode);
3787 #endif
3788 #if defined(TARGET_PPC)
3789     host_ip->__seq = tswap32(target_ip->__seq);
3790 #else
3791     host_ip->__seq = tswap16(target_ip->__seq);
3792 #endif
3793     unlock_user_struct(target_sd, target_addr, 0);
3794     return 0;
3795 }
3796 
3797 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3798                                                struct ipc_perm *host_ip)
3799 {
3800     struct target_ipc_perm *target_ip;
3801     struct target_semid64_ds *target_sd;
3802 
3803     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3804         return -TARGET_EFAULT;
3805     target_ip = &(target_sd->sem_perm);
3806     target_ip->__key = tswap32(host_ip->__key);
3807     target_ip->uid = tswap32(host_ip->uid);
3808     target_ip->gid = tswap32(host_ip->gid);
3809     target_ip->cuid = tswap32(host_ip->cuid);
3810     target_ip->cgid = tswap32(host_ip->cgid);
3811 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3812     target_ip->mode = tswap32(host_ip->mode);
3813 #else
3814     target_ip->mode = tswap16(host_ip->mode);
3815 #endif
3816 #if defined(TARGET_PPC)
3817     target_ip->__seq = tswap32(host_ip->__seq);
3818 #else
3819     target_ip->__seq = tswap16(host_ip->__seq);
3820 #endif
3821     unlock_user_struct(target_sd, target_addr, 1);
3822     return 0;
3823 }
3824 
3825 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3826                                                abi_ulong target_addr)
3827 {
3828     struct target_semid64_ds *target_sd;
3829 
3830     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3831         return -TARGET_EFAULT;
3832     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3833         return -TARGET_EFAULT;
3834     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3835     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3836     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3837     unlock_user_struct(target_sd, target_addr, 0);
3838     return 0;
3839 }
3840 
3841 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3842                                                struct semid_ds *host_sd)
3843 {
3844     struct target_semid64_ds *target_sd;
3845 
3846     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3847         return -TARGET_EFAULT;
3848     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3849         return -TARGET_EFAULT;
3850     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3851     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3852     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3853     unlock_user_struct(target_sd, target_addr, 1);
3854     return 0;
3855 }
3856 
3857 struct target_seminfo {
3858     int semmap;
3859     int semmni;
3860     int semmns;
3861     int semmnu;
3862     int semmsl;
3863     int semopm;
3864     int semume;
3865     int semusz;
3866     int semvmx;
3867     int semaem;
3868 };
3869 
3870 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3871                                               struct seminfo *host_seminfo)
3872 {
3873     struct target_seminfo *target_seminfo;
3874     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3875         return -TARGET_EFAULT;
3876     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3877     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3878     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3879     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3880     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3881     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3882     __put_user(host_seminfo->semume, &target_seminfo->semume);
3883     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3884     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3885     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3886     unlock_user_struct(target_seminfo, target_addr, 1);
3887     return 0;
3888 }
3889 
3890 union semun {
3891 	int val;
3892 	struct semid_ds *buf;
3893 	unsigned short *array;
3894 	struct seminfo *__buf;
3895 };
3896 
3897 union target_semun {
3898 	int val;
3899 	abi_ulong buf;
3900 	abi_ulong array;
3901 	abi_ulong __buf;
3902 };
3903 
3904 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3905                                                abi_ulong target_addr)
3906 {
3907     int nsems;
3908     unsigned short *array;
3909     union semun semun;
3910     struct semid_ds semid_ds;
3911     int i, ret;
3912 
3913     semun.buf = &semid_ds;
3914 
3915     ret = semctl(semid, 0, IPC_STAT, semun);
3916     if (ret == -1)
3917         return get_errno(ret);
3918 
3919     nsems = semid_ds.sem_nsems;
3920 
3921     *host_array = g_try_new(unsigned short, nsems);
3922     if (!*host_array) {
3923         return -TARGET_ENOMEM;
3924     }
3925     array = lock_user(VERIFY_READ, target_addr,
3926                       nsems*sizeof(unsigned short), 1);
3927     if (!array) {
3928         g_free(*host_array);
3929         return -TARGET_EFAULT;
3930     }
3931 
3932     for(i=0; i<nsems; i++) {
3933         __get_user((*host_array)[i], &array[i]);
3934     }
3935     unlock_user(array, target_addr, 0);
3936 
3937     return 0;
3938 }
3939 
3940 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3941                                                unsigned short **host_array)
3942 {
3943     int nsems;
3944     unsigned short *array;
3945     union semun semun;
3946     struct semid_ds semid_ds;
3947     int i, ret;
3948 
3949     semun.buf = &semid_ds;
3950 
3951     ret = semctl(semid, 0, IPC_STAT, semun);
3952     if (ret == -1)
3953         return get_errno(ret);
3954 
3955     nsems = semid_ds.sem_nsems;
3956 
3957     array = lock_user(VERIFY_WRITE, target_addr,
3958                       nsems*sizeof(unsigned short), 0);
3959     if (!array)
3960         return -TARGET_EFAULT;
3961 
3962     for(i=0; i<nsems; i++) {
3963         __put_user((*host_array)[i], &array[i]);
3964     }
3965     g_free(*host_array);
3966     unlock_user(array, target_addr, 1);
3967 
3968     return 0;
3969 }
3970 
3971 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3972                                  abi_ulong target_arg)
3973 {
3974     union target_semun target_su = { .buf = target_arg };
3975     union semun arg;
3976     struct semid_ds dsarg;
3977     unsigned short *array = NULL;
3978     struct seminfo seminfo;
3979     abi_long ret = -TARGET_EINVAL;
3980     abi_long err;
3981     cmd &= 0xff;
3982 
3983     switch( cmd ) {
3984 	case GETVAL:
3985 	case SETVAL:
3986             /* In 64 bit cross-endian situations, we will erroneously pick up
3987              * the wrong half of the union for the "val" element.  To rectify
3988              * this, the entire 8-byte structure is byteswapped, followed by
3989 	     * a swap of the 4 byte val field. In other cases, the data is
3990 	     * already in proper host byte order. */
3991 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3992 		target_su.buf = tswapal(target_su.buf);
3993 		arg.val = tswap32(target_su.val);
3994 	    } else {
3995 		arg.val = target_su.val;
3996 	    }
3997             ret = get_errno(semctl(semid, semnum, cmd, arg));
3998             break;
3999 	case GETALL:
4000 	case SETALL:
4001             err = target_to_host_semarray(semid, &array, target_su.array);
4002             if (err)
4003                 return err;
4004             arg.array = array;
4005             ret = get_errno(semctl(semid, semnum, cmd, arg));
4006             err = host_to_target_semarray(semid, target_su.array, &array);
4007             if (err)
4008                 return err;
4009             break;
4010 	case IPC_STAT:
4011 	case IPC_SET:
4012 	case SEM_STAT:
4013             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4014             if (err)
4015                 return err;
4016             arg.buf = &dsarg;
4017             ret = get_errno(semctl(semid, semnum, cmd, arg));
4018             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4019             if (err)
4020                 return err;
4021             break;
4022 	case IPC_INFO:
4023 	case SEM_INFO:
4024             arg.__buf = &seminfo;
4025             ret = get_errno(semctl(semid, semnum, cmd, arg));
4026             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4027             if (err)
4028                 return err;
4029             break;
4030 	case IPC_RMID:
4031 	case GETPID:
4032 	case GETNCNT:
4033 	case GETZCNT:
4034             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4035             break;
4036     }
4037 
4038     return ret;
4039 }
4040 
4041 struct target_sembuf {
4042     unsigned short sem_num;
4043     short sem_op;
4044     short sem_flg;
4045 };
4046 
4047 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4048                                              abi_ulong target_addr,
4049                                              unsigned nsops)
4050 {
4051     struct target_sembuf *target_sembuf;
4052     int i;
4053 
4054     target_sembuf = lock_user(VERIFY_READ, target_addr,
4055                               nsops*sizeof(struct target_sembuf), 1);
4056     if (!target_sembuf)
4057         return -TARGET_EFAULT;
4058 
4059     for(i=0; i<nsops; i++) {
4060         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4061         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4062         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4063     }
4064 
4065     unlock_user(target_sembuf, target_addr, 0);
4066 
4067     return 0;
4068 }
4069 
4070 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4071     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4072 
4073 /*
4074  * This macro is required to handle the s390 variants, which passes the
4075  * arguments in a different order than default.
4076  */
4077 #ifdef __s390x__
4078 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4079   (__nsops), (__timeout), (__sops)
4080 #else
4081 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4082   (__nsops), 0, (__sops), (__timeout)
4083 #endif
4084 
4085 static inline abi_long do_semtimedop(int semid,
4086                                      abi_long ptr,
4087                                      unsigned nsops,
4088                                      abi_long timeout, bool time64)
4089 {
4090     struct sembuf *sops;
4091     struct timespec ts, *pts = NULL;
4092     abi_long ret;
4093 
4094     if (timeout) {
4095         pts = &ts;
4096         if (time64) {
4097             if (target_to_host_timespec64(pts, timeout)) {
4098                 return -TARGET_EFAULT;
4099             }
4100         } else {
4101             if (target_to_host_timespec(pts, timeout)) {
4102                 return -TARGET_EFAULT;
4103             }
4104         }
4105     }
4106 
4107     if (nsops > TARGET_SEMOPM) {
4108         return -TARGET_E2BIG;
4109     }
4110 
4111     sops = g_new(struct sembuf, nsops);
4112 
4113     if (target_to_host_sembuf(sops, ptr, nsops)) {
4114         g_free(sops);
4115         return -TARGET_EFAULT;
4116     }
4117 
4118     ret = -TARGET_ENOSYS;
4119 #ifdef __NR_semtimedop
4120     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4121 #endif
4122 #ifdef __NR_ipc
4123     if (ret == -TARGET_ENOSYS) {
4124         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4125                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4126     }
4127 #endif
4128     g_free(sops);
4129     return ret;
4130 }
4131 #endif
4132 
4133 struct target_msqid_ds
4134 {
4135     struct target_ipc_perm msg_perm;
4136     abi_ulong msg_stime;
4137 #if TARGET_ABI_BITS == 32
4138     abi_ulong __unused1;
4139 #endif
4140     abi_ulong msg_rtime;
4141 #if TARGET_ABI_BITS == 32
4142     abi_ulong __unused2;
4143 #endif
4144     abi_ulong msg_ctime;
4145 #if TARGET_ABI_BITS == 32
4146     abi_ulong __unused3;
4147 #endif
4148     abi_ulong __msg_cbytes;
4149     abi_ulong msg_qnum;
4150     abi_ulong msg_qbytes;
4151     abi_ulong msg_lspid;
4152     abi_ulong msg_lrpid;
4153     abi_ulong __unused4;
4154     abi_ulong __unused5;
4155 };
4156 
4157 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4158                                                abi_ulong target_addr)
4159 {
4160     struct target_msqid_ds *target_md;
4161 
4162     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4163         return -TARGET_EFAULT;
4164     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4165         return -TARGET_EFAULT;
4166     host_md->msg_stime = tswapal(target_md->msg_stime);
4167     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4168     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4169     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4170     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4171     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4172     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4173     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4174     unlock_user_struct(target_md, target_addr, 0);
4175     return 0;
4176 }
4177 
4178 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4179                                                struct msqid_ds *host_md)
4180 {
4181     struct target_msqid_ds *target_md;
4182 
4183     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4184         return -TARGET_EFAULT;
4185     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4186         return -TARGET_EFAULT;
4187     target_md->msg_stime = tswapal(host_md->msg_stime);
4188     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4189     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4190     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4191     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4192     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4193     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4194     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4195     unlock_user_struct(target_md, target_addr, 1);
4196     return 0;
4197 }
4198 
4199 struct target_msginfo {
4200     int msgpool;
4201     int msgmap;
4202     int msgmax;
4203     int msgmnb;
4204     int msgmni;
4205     int msgssz;
4206     int msgtql;
4207     unsigned short int msgseg;
4208 };
4209 
4210 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4211                                               struct msginfo *host_msginfo)
4212 {
4213     struct target_msginfo *target_msginfo;
4214     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4215         return -TARGET_EFAULT;
4216     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4217     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4218     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4219     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4220     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4221     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4222     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4223     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4224     unlock_user_struct(target_msginfo, target_addr, 1);
4225     return 0;
4226 }
4227 
4228 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4229 {
4230     struct msqid_ds dsarg;
4231     struct msginfo msginfo;
4232     abi_long ret = -TARGET_EINVAL;
4233 
4234     cmd &= 0xff;
4235 
4236     switch (cmd) {
4237     case IPC_STAT:
4238     case IPC_SET:
4239     case MSG_STAT:
4240         if (target_to_host_msqid_ds(&dsarg,ptr))
4241             return -TARGET_EFAULT;
4242         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4243         if (host_to_target_msqid_ds(ptr,&dsarg))
4244             return -TARGET_EFAULT;
4245         break;
4246     case IPC_RMID:
4247         ret = get_errno(msgctl(msgid, cmd, NULL));
4248         break;
4249     case IPC_INFO:
4250     case MSG_INFO:
4251         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4252         if (host_to_target_msginfo(ptr, &msginfo))
4253             return -TARGET_EFAULT;
4254         break;
4255     }
4256 
4257     return ret;
4258 }
4259 
4260 struct target_msgbuf {
4261     abi_long mtype;
4262     char	mtext[1];
4263 };
4264 
4265 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4266                                  ssize_t msgsz, int msgflg)
4267 {
4268     struct target_msgbuf *target_mb;
4269     struct msgbuf *host_mb;
4270     abi_long ret = 0;
4271 
4272     if (msgsz < 0) {
4273         return -TARGET_EINVAL;
4274     }
4275 
4276     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4277         return -TARGET_EFAULT;
4278     host_mb = g_try_malloc(msgsz + sizeof(long));
4279     if (!host_mb) {
4280         unlock_user_struct(target_mb, msgp, 0);
4281         return -TARGET_ENOMEM;
4282     }
4283     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4284     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4285     ret = -TARGET_ENOSYS;
4286 #ifdef __NR_msgsnd
4287     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4288 #endif
4289 #ifdef __NR_ipc
4290     if (ret == -TARGET_ENOSYS) {
4291 #ifdef __s390x__
4292         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4293                                  host_mb));
4294 #else
4295         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4296                                  host_mb, 0));
4297 #endif
4298     }
4299 #endif
4300     g_free(host_mb);
4301     unlock_user_struct(target_mb, msgp, 0);
4302 
4303     return ret;
4304 }
4305 
4306 #ifdef __NR_ipc
4307 #if defined(__sparc__)
4308 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4309 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4310 #elif defined(__s390x__)
4311 /* The s390 sys_ipc variant has only five parameters.  */
4312 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4313     ((long int[]){(long int)__msgp, __msgtyp})
4314 #else
4315 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4316     ((long int[]){(long int)__msgp, __msgtyp}), 0
4317 #endif
4318 #endif
4319 
4320 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4321                                  ssize_t msgsz, abi_long msgtyp,
4322                                  int msgflg)
4323 {
4324     struct target_msgbuf *target_mb;
4325     char *target_mtext;
4326     struct msgbuf *host_mb;
4327     abi_long ret = 0;
4328 
4329     if (msgsz < 0) {
4330         return -TARGET_EINVAL;
4331     }
4332 
4333     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4334         return -TARGET_EFAULT;
4335 
4336     host_mb = g_try_malloc(msgsz + sizeof(long));
4337     if (!host_mb) {
4338         ret = -TARGET_ENOMEM;
4339         goto end;
4340     }
4341     ret = -TARGET_ENOSYS;
4342 #ifdef __NR_msgrcv
4343     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4344 #endif
4345 #ifdef __NR_ipc
4346     if (ret == -TARGET_ENOSYS) {
4347         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4348                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4349     }
4350 #endif
4351 
4352     if (ret > 0) {
4353         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4354         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4355         if (!target_mtext) {
4356             ret = -TARGET_EFAULT;
4357             goto end;
4358         }
4359         memcpy(target_mb->mtext, host_mb->mtext, ret);
4360         unlock_user(target_mtext, target_mtext_addr, ret);
4361     }
4362 
4363     target_mb->mtype = tswapal(host_mb->mtype);
4364 
4365 end:
4366     if (target_mb)
4367         unlock_user_struct(target_mb, msgp, 1);
4368     g_free(host_mb);
4369     return ret;
4370 }
4371 
4372 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4373                                                abi_ulong target_addr)
4374 {
4375     struct target_shmid_ds *target_sd;
4376 
4377     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4378         return -TARGET_EFAULT;
4379     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4380         return -TARGET_EFAULT;
4381     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4382     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4383     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4384     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4385     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4386     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4387     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4388     unlock_user_struct(target_sd, target_addr, 0);
4389     return 0;
4390 }
4391 
4392 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4393                                                struct shmid_ds *host_sd)
4394 {
4395     struct target_shmid_ds *target_sd;
4396 
4397     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4398         return -TARGET_EFAULT;
4399     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4400         return -TARGET_EFAULT;
4401     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4402     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4403     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4404     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4405     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4406     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4407     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4408     unlock_user_struct(target_sd, target_addr, 1);
4409     return 0;
4410 }
4411 
4412 struct  target_shminfo {
4413     abi_ulong shmmax;
4414     abi_ulong shmmin;
4415     abi_ulong shmmni;
4416     abi_ulong shmseg;
4417     abi_ulong shmall;
4418 };
4419 
4420 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4421                                               struct shminfo *host_shminfo)
4422 {
4423     struct target_shminfo *target_shminfo;
4424     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4425         return -TARGET_EFAULT;
4426     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4427     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4428     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4429     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4430     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4431     unlock_user_struct(target_shminfo, target_addr, 1);
4432     return 0;
4433 }
4434 
4435 struct target_shm_info {
4436     int used_ids;
4437     abi_ulong shm_tot;
4438     abi_ulong shm_rss;
4439     abi_ulong shm_swp;
4440     abi_ulong swap_attempts;
4441     abi_ulong swap_successes;
4442 };
4443 
4444 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4445                                                struct shm_info *host_shm_info)
4446 {
4447     struct target_shm_info *target_shm_info;
4448     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4449         return -TARGET_EFAULT;
4450     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4451     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4452     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4453     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4454     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4455     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4456     unlock_user_struct(target_shm_info, target_addr, 1);
4457     return 0;
4458 }
4459 
4460 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4461 {
4462     struct shmid_ds dsarg;
4463     struct shminfo shminfo;
4464     struct shm_info shm_info;
4465     abi_long ret = -TARGET_EINVAL;
4466 
4467     cmd &= 0xff;
4468 
4469     switch(cmd) {
4470     case IPC_STAT:
4471     case IPC_SET:
4472     case SHM_STAT:
4473         if (target_to_host_shmid_ds(&dsarg, buf))
4474             return -TARGET_EFAULT;
4475         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4476         if (host_to_target_shmid_ds(buf, &dsarg))
4477             return -TARGET_EFAULT;
4478         break;
4479     case IPC_INFO:
4480         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4481         if (host_to_target_shminfo(buf, &shminfo))
4482             return -TARGET_EFAULT;
4483         break;
4484     case SHM_INFO:
4485         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4486         if (host_to_target_shm_info(buf, &shm_info))
4487             return -TARGET_EFAULT;
4488         break;
4489     case IPC_RMID:
4490     case SHM_LOCK:
4491     case SHM_UNLOCK:
4492         ret = get_errno(shmctl(shmid, cmd, NULL));
4493         break;
4494     }
4495 
4496     return ret;
4497 }
4498 
4499 #ifdef TARGET_NR_ipc
4500 /* ??? This only works with linear mappings.  */
4501 /* do_ipc() must return target values and target errnos. */
4502 static abi_long do_ipc(CPUArchState *cpu_env,
4503                        unsigned int call, abi_long first,
4504                        abi_long second, abi_long third,
4505                        abi_long ptr, abi_long fifth)
4506 {
4507     int version;
4508     abi_long ret = 0;
4509 
4510     version = call >> 16;
4511     call &= 0xffff;
4512 
4513     switch (call) {
4514     case IPCOP_semop:
4515         ret = do_semtimedop(first, ptr, second, 0, false);
4516         break;
4517     case IPCOP_semtimedop:
4518     /*
4519      * The s390 sys_ipc variant has only five parameters instead of six
4520      * (as for default variant) and the only difference is the handling of
4521      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4522      * to a struct timespec where the generic variant uses fifth parameter.
4523      */
4524 #if defined(TARGET_S390X)
4525         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4526 #else
4527         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4528 #endif
4529         break;
4530 
4531     case IPCOP_semget:
4532         ret = get_errno(semget(first, second, third));
4533         break;
4534 
4535     case IPCOP_semctl: {
4536         /* The semun argument to semctl is passed by value, so dereference the
4537          * ptr argument. */
4538         abi_ulong atptr;
4539         get_user_ual(atptr, ptr);
4540         ret = do_semctl(first, second, third, atptr);
4541         break;
4542     }
4543 
4544     case IPCOP_msgget:
4545         ret = get_errno(msgget(first, second));
4546         break;
4547 
4548     case IPCOP_msgsnd:
4549         ret = do_msgsnd(first, ptr, second, third);
4550         break;
4551 
4552     case IPCOP_msgctl:
4553         ret = do_msgctl(first, second, ptr);
4554         break;
4555 
4556     case IPCOP_msgrcv:
4557         switch (version) {
4558         case 0:
4559             {
4560                 struct target_ipc_kludge {
4561                     abi_long msgp;
4562                     abi_long msgtyp;
4563                 } *tmp;
4564 
4565                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4566                     ret = -TARGET_EFAULT;
4567                     break;
4568                 }
4569 
4570                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4571 
4572                 unlock_user_struct(tmp, ptr, 0);
4573                 break;
4574             }
4575         default:
4576             ret = do_msgrcv(first, ptr, second, fifth, third);
4577         }
4578         break;
4579 
4580     case IPCOP_shmat:
4581         switch (version) {
4582         default:
4583         {
4584             abi_ulong raddr;
4585             raddr = target_shmat(cpu_env, first, ptr, second);
4586             if (is_error(raddr))
4587                 return get_errno(raddr);
4588             if (put_user_ual(raddr, third))
4589                 return -TARGET_EFAULT;
4590             break;
4591         }
4592         case 1:
4593             ret = -TARGET_EINVAL;
4594             break;
4595         }
4596 	break;
4597     case IPCOP_shmdt:
4598         ret = target_shmdt(ptr);
4599 	break;
4600 
4601     case IPCOP_shmget:
4602 	/* IPC_* flag values are the same on all linux platforms */
4603 	ret = get_errno(shmget(first, second, third));
4604 	break;
4605 
4606 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4607     case IPCOP_shmctl:
4608         ret = do_shmctl(first, second, ptr);
4609         break;
4610     default:
4611         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4612                       call, version);
4613 	ret = -TARGET_ENOSYS;
4614 	break;
4615     }
4616     return ret;
4617 }
4618 #endif
4619 
4620 /* kernel structure types definitions */
4621 
4622 #define STRUCT(name, ...) STRUCT_ ## name,
4623 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4624 enum {
4625 #include "syscall_types.h"
4626 STRUCT_MAX
4627 };
4628 #undef STRUCT
4629 #undef STRUCT_SPECIAL
4630 
4631 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4632 #define STRUCT_SPECIAL(name)
4633 #include "syscall_types.h"
4634 #undef STRUCT
4635 #undef STRUCT_SPECIAL
4636 
4637 #define MAX_STRUCT_SIZE 4096
4638 
4639 #ifdef CONFIG_FIEMAP
4640 /* So fiemap access checks don't overflow on 32 bit systems.
4641  * This is very slightly smaller than the limit imposed by
4642  * the underlying kernel.
4643  */
4644 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4645                             / sizeof(struct fiemap_extent))
4646 
4647 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4648                                        int fd, int cmd, abi_long arg)
4649 {
4650     /* The parameter for this ioctl is a struct fiemap followed
4651      * by an array of struct fiemap_extent whose size is set
4652      * in fiemap->fm_extent_count. The array is filled in by the
4653      * ioctl.
4654      */
4655     int target_size_in, target_size_out;
4656     struct fiemap *fm;
4657     const argtype *arg_type = ie->arg_type;
4658     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4659     void *argptr, *p;
4660     abi_long ret;
4661     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4662     uint32_t outbufsz;
4663     int free_fm = 0;
4664 
4665     assert(arg_type[0] == TYPE_PTR);
4666     assert(ie->access == IOC_RW);
4667     arg_type++;
4668     target_size_in = thunk_type_size(arg_type, 0);
4669     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4670     if (!argptr) {
4671         return -TARGET_EFAULT;
4672     }
4673     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4674     unlock_user(argptr, arg, 0);
4675     fm = (struct fiemap *)buf_temp;
4676     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4677         return -TARGET_EINVAL;
4678     }
4679 
4680     outbufsz = sizeof (*fm) +
4681         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4682 
4683     if (outbufsz > MAX_STRUCT_SIZE) {
4684         /* We can't fit all the extents into the fixed size buffer.
4685          * Allocate one that is large enough and use it instead.
4686          */
4687         fm = g_try_malloc(outbufsz);
4688         if (!fm) {
4689             return -TARGET_ENOMEM;
4690         }
4691         memcpy(fm, buf_temp, sizeof(struct fiemap));
4692         free_fm = 1;
4693     }
4694     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4695     if (!is_error(ret)) {
4696         target_size_out = target_size_in;
4697         /* An extent_count of 0 means we were only counting the extents
4698          * so there are no structs to copy
4699          */
4700         if (fm->fm_extent_count != 0) {
4701             target_size_out += fm->fm_mapped_extents * extent_size;
4702         }
4703         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4704         if (!argptr) {
4705             ret = -TARGET_EFAULT;
4706         } else {
4707             /* Convert the struct fiemap */
4708             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4709             if (fm->fm_extent_count != 0) {
4710                 p = argptr + target_size_in;
4711                 /* ...and then all the struct fiemap_extents */
4712                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4713                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4714                                   THUNK_TARGET);
4715                     p += extent_size;
4716                 }
4717             }
4718             unlock_user(argptr, arg, target_size_out);
4719         }
4720     }
4721     if (free_fm) {
4722         g_free(fm);
4723     }
4724     return ret;
4725 }
4726 #endif
4727 
4728 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4729                                 int fd, int cmd, abi_long arg)
4730 {
4731     const argtype *arg_type = ie->arg_type;
4732     int target_size;
4733     void *argptr;
4734     int ret;
4735     struct ifconf *host_ifconf;
4736     uint32_t outbufsz;
4737     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4738     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4739     int target_ifreq_size;
4740     int nb_ifreq;
4741     int free_buf = 0;
4742     int i;
4743     int target_ifc_len;
4744     abi_long target_ifc_buf;
4745     int host_ifc_len;
4746     char *host_ifc_buf;
4747 
4748     assert(arg_type[0] == TYPE_PTR);
4749     assert(ie->access == IOC_RW);
4750 
4751     arg_type++;
4752     target_size = thunk_type_size(arg_type, 0);
4753 
4754     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4755     if (!argptr)
4756         return -TARGET_EFAULT;
4757     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4758     unlock_user(argptr, arg, 0);
4759 
4760     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4761     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4762     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4763 
4764     if (target_ifc_buf != 0) {
4765         target_ifc_len = host_ifconf->ifc_len;
4766         nb_ifreq = target_ifc_len / target_ifreq_size;
4767         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4768 
4769         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4770         if (outbufsz > MAX_STRUCT_SIZE) {
4771             /*
4772              * We can't fit all the extents into the fixed size buffer.
4773              * Allocate one that is large enough and use it instead.
4774              */
4775             host_ifconf = g_try_malloc(outbufsz);
4776             if (!host_ifconf) {
4777                 return -TARGET_ENOMEM;
4778             }
4779             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4780             free_buf = 1;
4781         }
4782         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4783 
4784         host_ifconf->ifc_len = host_ifc_len;
4785     } else {
4786       host_ifc_buf = NULL;
4787     }
4788     host_ifconf->ifc_buf = host_ifc_buf;
4789 
4790     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4791     if (!is_error(ret)) {
4792 	/* convert host ifc_len to target ifc_len */
4793 
4794         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4795         target_ifc_len = nb_ifreq * target_ifreq_size;
4796         host_ifconf->ifc_len = target_ifc_len;
4797 
4798 	/* restore target ifc_buf */
4799 
4800         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4801 
4802 	/* copy struct ifconf to target user */
4803 
4804         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4805         if (!argptr)
4806             return -TARGET_EFAULT;
4807         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4808         unlock_user(argptr, arg, target_size);
4809 
4810         if (target_ifc_buf != 0) {
4811             /* copy ifreq[] to target user */
4812             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4813             for (i = 0; i < nb_ifreq ; i++) {
4814                 thunk_convert(argptr + i * target_ifreq_size,
4815                               host_ifc_buf + i * sizeof(struct ifreq),
4816                               ifreq_arg_type, THUNK_TARGET);
4817             }
4818             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4819         }
4820     }
4821 
4822     if (free_buf) {
4823         g_free(host_ifconf);
4824     }
4825 
4826     return ret;
4827 }
4828 
4829 #if defined(CONFIG_USBFS)
4830 #if HOST_LONG_BITS > 64
4831 #error USBDEVFS thunks do not support >64 bit hosts yet.
4832 #endif
4833 struct live_urb {
4834     uint64_t target_urb_adr;
4835     uint64_t target_buf_adr;
4836     char *target_buf_ptr;
4837     struct usbdevfs_urb host_urb;
4838 };
4839 
4840 static GHashTable *usbdevfs_urb_hashtable(void)
4841 {
4842     static GHashTable *urb_hashtable;
4843 
4844     if (!urb_hashtable) {
4845         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4846     }
4847     return urb_hashtable;
4848 }
4849 
4850 static void urb_hashtable_insert(struct live_urb *urb)
4851 {
4852     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4853     g_hash_table_insert(urb_hashtable, urb, urb);
4854 }
4855 
4856 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4857 {
4858     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4859     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4860 }
4861 
4862 static void urb_hashtable_remove(struct live_urb *urb)
4863 {
4864     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4865     g_hash_table_remove(urb_hashtable, urb);
4866 }
4867 
4868 static abi_long
4869 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4870                           int fd, int cmd, abi_long arg)
4871 {
4872     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4873     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4874     struct live_urb *lurb;
4875     void *argptr;
4876     uint64_t hurb;
4877     int target_size;
4878     uintptr_t target_urb_adr;
4879     abi_long ret;
4880 
4881     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4882 
4883     memset(buf_temp, 0, sizeof(uint64_t));
4884     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4885     if (is_error(ret)) {
4886         return ret;
4887     }
4888 
4889     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4890     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4891     if (!lurb->target_urb_adr) {
4892         return -TARGET_EFAULT;
4893     }
4894     urb_hashtable_remove(lurb);
4895     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4896         lurb->host_urb.buffer_length);
4897     lurb->target_buf_ptr = NULL;
4898 
4899     /* restore the guest buffer pointer */
4900     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4901 
4902     /* update the guest urb struct */
4903     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4904     if (!argptr) {
4905         g_free(lurb);
4906         return -TARGET_EFAULT;
4907     }
4908     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4909     unlock_user(argptr, lurb->target_urb_adr, target_size);
4910 
4911     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4912     /* write back the urb handle */
4913     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4914     if (!argptr) {
4915         g_free(lurb);
4916         return -TARGET_EFAULT;
4917     }
4918 
4919     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4920     target_urb_adr = lurb->target_urb_adr;
4921     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4922     unlock_user(argptr, arg, target_size);
4923 
4924     g_free(lurb);
4925     return ret;
4926 }
4927 
4928 static abi_long
4929 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4930                              uint8_t *buf_temp __attribute__((unused)),
4931                              int fd, int cmd, abi_long arg)
4932 {
4933     struct live_urb *lurb;
4934 
4935     /* map target address back to host URB with metadata. */
4936     lurb = urb_hashtable_lookup(arg);
4937     if (!lurb) {
4938         return -TARGET_EFAULT;
4939     }
4940     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4941 }
4942 
4943 static abi_long
4944 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4945                             int fd, int cmd, abi_long arg)
4946 {
4947     const argtype *arg_type = ie->arg_type;
4948     int target_size;
4949     abi_long ret;
4950     void *argptr;
4951     int rw_dir;
4952     struct live_urb *lurb;
4953 
4954     /*
4955      * each submitted URB needs to map to a unique ID for the
4956      * kernel, and that unique ID needs to be a pointer to
4957      * host memory.  hence, we need to malloc for each URB.
4958      * isochronous transfers have a variable length struct.
4959      */
4960     arg_type++;
4961     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4962 
4963     /* construct host copy of urb and metadata */
4964     lurb = g_try_new0(struct live_urb, 1);
4965     if (!lurb) {
4966         return -TARGET_ENOMEM;
4967     }
4968 
4969     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4970     if (!argptr) {
4971         g_free(lurb);
4972         return -TARGET_EFAULT;
4973     }
4974     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4975     unlock_user(argptr, arg, 0);
4976 
4977     lurb->target_urb_adr = arg;
4978     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4979 
4980     /* buffer space used depends on endpoint type so lock the entire buffer */
4981     /* control type urbs should check the buffer contents for true direction */
4982     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4983     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4984         lurb->host_urb.buffer_length, 1);
4985     if (lurb->target_buf_ptr == NULL) {
4986         g_free(lurb);
4987         return -TARGET_EFAULT;
4988     }
4989 
4990     /* update buffer pointer in host copy */
4991     lurb->host_urb.buffer = lurb->target_buf_ptr;
4992 
4993     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4994     if (is_error(ret)) {
4995         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4996         g_free(lurb);
4997     } else {
4998         urb_hashtable_insert(lurb);
4999     }
5000 
5001     return ret;
5002 }
5003 #endif /* CONFIG_USBFS */
5004 
5005 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5006                             int cmd, abi_long arg)
5007 {
5008     void *argptr;
5009     struct dm_ioctl *host_dm;
5010     abi_long guest_data;
5011     uint32_t guest_data_size;
5012     int target_size;
5013     const argtype *arg_type = ie->arg_type;
5014     abi_long ret;
5015     void *big_buf = NULL;
5016     char *host_data;
5017 
5018     arg_type++;
5019     target_size = thunk_type_size(arg_type, 0);
5020     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5021     if (!argptr) {
5022         ret = -TARGET_EFAULT;
5023         goto out;
5024     }
5025     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5026     unlock_user(argptr, arg, 0);
5027 
5028     /* buf_temp is too small, so fetch things into a bigger buffer */
5029     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5030     memcpy(big_buf, buf_temp, target_size);
5031     buf_temp = big_buf;
5032     host_dm = big_buf;
5033 
5034     guest_data = arg + host_dm->data_start;
5035     if ((guest_data - arg) < 0) {
5036         ret = -TARGET_EINVAL;
5037         goto out;
5038     }
5039     guest_data_size = host_dm->data_size - host_dm->data_start;
5040     host_data = (char*)host_dm + host_dm->data_start;
5041 
5042     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5043     if (!argptr) {
5044         ret = -TARGET_EFAULT;
5045         goto out;
5046     }
5047 
5048     switch (ie->host_cmd) {
5049     case DM_REMOVE_ALL:
5050     case DM_LIST_DEVICES:
5051     case DM_DEV_CREATE:
5052     case DM_DEV_REMOVE:
5053     case DM_DEV_SUSPEND:
5054     case DM_DEV_STATUS:
5055     case DM_DEV_WAIT:
5056     case DM_TABLE_STATUS:
5057     case DM_TABLE_CLEAR:
5058     case DM_TABLE_DEPS:
5059     case DM_LIST_VERSIONS:
5060         /* no input data */
5061         break;
5062     case DM_DEV_RENAME:
5063     case DM_DEV_SET_GEOMETRY:
5064         /* data contains only strings */
5065         memcpy(host_data, argptr, guest_data_size);
5066         break;
5067     case DM_TARGET_MSG:
5068         memcpy(host_data, argptr, guest_data_size);
5069         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5070         break;
5071     case DM_TABLE_LOAD:
5072     {
5073         void *gspec = argptr;
5074         void *cur_data = host_data;
5075         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5076         int spec_size = thunk_type_size(dm_arg_type, 0);
5077         int i;
5078 
5079         for (i = 0; i < host_dm->target_count; i++) {
5080             struct dm_target_spec *spec = cur_data;
5081             uint32_t next;
5082             int slen;
5083 
5084             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5085             slen = strlen((char*)gspec + spec_size) + 1;
5086             next = spec->next;
5087             spec->next = sizeof(*spec) + slen;
5088             strcpy((char*)&spec[1], gspec + spec_size);
5089             gspec += next;
5090             cur_data += spec->next;
5091         }
5092         break;
5093     }
5094     default:
5095         ret = -TARGET_EINVAL;
5096         unlock_user(argptr, guest_data, 0);
5097         goto out;
5098     }
5099     unlock_user(argptr, guest_data, 0);
5100 
5101     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5102     if (!is_error(ret)) {
5103         guest_data = arg + host_dm->data_start;
5104         guest_data_size = host_dm->data_size - host_dm->data_start;
5105         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5106         switch (ie->host_cmd) {
5107         case DM_REMOVE_ALL:
5108         case DM_DEV_CREATE:
5109         case DM_DEV_REMOVE:
5110         case DM_DEV_RENAME:
5111         case DM_DEV_SUSPEND:
5112         case DM_DEV_STATUS:
5113         case DM_TABLE_LOAD:
5114         case DM_TABLE_CLEAR:
5115         case DM_TARGET_MSG:
5116         case DM_DEV_SET_GEOMETRY:
5117             /* no return data */
5118             break;
5119         case DM_LIST_DEVICES:
5120         {
5121             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5122             uint32_t remaining_data = guest_data_size;
5123             void *cur_data = argptr;
5124             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5125             int nl_size = 12; /* can't use thunk_size due to alignment */
5126 
5127             while (1) {
5128                 uint32_t next = nl->next;
5129                 if (next) {
5130                     nl->next = nl_size + (strlen(nl->name) + 1);
5131                 }
5132                 if (remaining_data < nl->next) {
5133                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5134                     break;
5135                 }
5136                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5137                 strcpy(cur_data + nl_size, nl->name);
5138                 cur_data += nl->next;
5139                 remaining_data -= nl->next;
5140                 if (!next) {
5141                     break;
5142                 }
5143                 nl = (void*)nl + next;
5144             }
5145             break;
5146         }
5147         case DM_DEV_WAIT:
5148         case DM_TABLE_STATUS:
5149         {
5150             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5151             void *cur_data = argptr;
5152             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5153             int spec_size = thunk_type_size(dm_arg_type, 0);
5154             int i;
5155 
5156             for (i = 0; i < host_dm->target_count; i++) {
5157                 uint32_t next = spec->next;
5158                 int slen = strlen((char*)&spec[1]) + 1;
5159                 spec->next = (cur_data - argptr) + spec_size + slen;
5160                 if (guest_data_size < spec->next) {
5161                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5162                     break;
5163                 }
5164                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5165                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5166                 cur_data = argptr + spec->next;
5167                 spec = (void*)host_dm + host_dm->data_start + next;
5168             }
5169             break;
5170         }
5171         case DM_TABLE_DEPS:
5172         {
5173             void *hdata = (void*)host_dm + host_dm->data_start;
5174             int count = *(uint32_t*)hdata;
5175             uint64_t *hdev = hdata + 8;
5176             uint64_t *gdev = argptr + 8;
5177             int i;
5178 
5179             *(uint32_t*)argptr = tswap32(count);
5180             for (i = 0; i < count; i++) {
5181                 *gdev = tswap64(*hdev);
5182                 gdev++;
5183                 hdev++;
5184             }
5185             break;
5186         }
5187         case DM_LIST_VERSIONS:
5188         {
5189             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5190             uint32_t remaining_data = guest_data_size;
5191             void *cur_data = argptr;
5192             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5193             int vers_size = thunk_type_size(dm_arg_type, 0);
5194 
5195             while (1) {
5196                 uint32_t next = vers->next;
5197                 if (next) {
5198                     vers->next = vers_size + (strlen(vers->name) + 1);
5199                 }
5200                 if (remaining_data < vers->next) {
5201                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5202                     break;
5203                 }
5204                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5205                 strcpy(cur_data + vers_size, vers->name);
5206                 cur_data += vers->next;
5207                 remaining_data -= vers->next;
5208                 if (!next) {
5209                     break;
5210                 }
5211                 vers = (void*)vers + next;
5212             }
5213             break;
5214         }
5215         default:
5216             unlock_user(argptr, guest_data, 0);
5217             ret = -TARGET_EINVAL;
5218             goto out;
5219         }
5220         unlock_user(argptr, guest_data, guest_data_size);
5221 
5222         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5223         if (!argptr) {
5224             ret = -TARGET_EFAULT;
5225             goto out;
5226         }
5227         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5228         unlock_user(argptr, arg, target_size);
5229     }
5230 out:
5231     g_free(big_buf);
5232     return ret;
5233 }
5234 
5235 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5236                                int cmd, abi_long arg)
5237 {
5238     void *argptr;
5239     int target_size;
5240     const argtype *arg_type = ie->arg_type;
5241     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5242     abi_long ret;
5243 
5244     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5245     struct blkpg_partition host_part;
5246 
5247     /* Read and convert blkpg */
5248     arg_type++;
5249     target_size = thunk_type_size(arg_type, 0);
5250     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5251     if (!argptr) {
5252         ret = -TARGET_EFAULT;
5253         goto out;
5254     }
5255     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5256     unlock_user(argptr, arg, 0);
5257 
5258     switch (host_blkpg->op) {
5259     case BLKPG_ADD_PARTITION:
5260     case BLKPG_DEL_PARTITION:
5261         /* payload is struct blkpg_partition */
5262         break;
5263     default:
5264         /* Unknown opcode */
5265         ret = -TARGET_EINVAL;
5266         goto out;
5267     }
5268 
5269     /* Read and convert blkpg->data */
5270     arg = (abi_long)(uintptr_t)host_blkpg->data;
5271     target_size = thunk_type_size(part_arg_type, 0);
5272     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5273     if (!argptr) {
5274         ret = -TARGET_EFAULT;
5275         goto out;
5276     }
5277     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5278     unlock_user(argptr, arg, 0);
5279 
5280     /* Swizzle the data pointer to our local copy and call! */
5281     host_blkpg->data = &host_part;
5282     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5283 
5284 out:
5285     return ret;
5286 }
5287 
5288 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5289                                 int fd, int cmd, abi_long arg)
5290 {
5291     const argtype *arg_type = ie->arg_type;
5292     const StructEntry *se;
5293     const argtype *field_types;
5294     const int *dst_offsets, *src_offsets;
5295     int target_size;
5296     void *argptr;
5297     abi_ulong *target_rt_dev_ptr = NULL;
5298     unsigned long *host_rt_dev_ptr = NULL;
5299     abi_long ret;
5300     int i;
5301 
5302     assert(ie->access == IOC_W);
5303     assert(*arg_type == TYPE_PTR);
5304     arg_type++;
5305     assert(*arg_type == TYPE_STRUCT);
5306     target_size = thunk_type_size(arg_type, 0);
5307     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5308     if (!argptr) {
5309         return -TARGET_EFAULT;
5310     }
5311     arg_type++;
5312     assert(*arg_type == (int)STRUCT_rtentry);
5313     se = struct_entries + *arg_type++;
5314     assert(se->convert[0] == NULL);
5315     /* convert struct here to be able to catch rt_dev string */
5316     field_types = se->field_types;
5317     dst_offsets = se->field_offsets[THUNK_HOST];
5318     src_offsets = se->field_offsets[THUNK_TARGET];
5319     for (i = 0; i < se->nb_fields; i++) {
5320         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5321             assert(*field_types == TYPE_PTRVOID);
5322             target_rt_dev_ptr = argptr + src_offsets[i];
5323             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5324             if (*target_rt_dev_ptr != 0) {
5325                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5326                                                   tswapal(*target_rt_dev_ptr));
5327                 if (!*host_rt_dev_ptr) {
5328                     unlock_user(argptr, arg, 0);
5329                     return -TARGET_EFAULT;
5330                 }
5331             } else {
5332                 *host_rt_dev_ptr = 0;
5333             }
5334             field_types++;
5335             continue;
5336         }
5337         field_types = thunk_convert(buf_temp + dst_offsets[i],
5338                                     argptr + src_offsets[i],
5339                                     field_types, THUNK_HOST);
5340     }
5341     unlock_user(argptr, arg, 0);
5342 
5343     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5344 
5345     assert(host_rt_dev_ptr != NULL);
5346     assert(target_rt_dev_ptr != NULL);
5347     if (*host_rt_dev_ptr != 0) {
5348         unlock_user((void *)*host_rt_dev_ptr,
5349                     *target_rt_dev_ptr, 0);
5350     }
5351     return ret;
5352 }
5353 
5354 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5355                                      int fd, int cmd, abi_long arg)
5356 {
5357     int sig = target_to_host_signal(arg);
5358     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5359 }
5360 
5361 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5362                                     int fd, int cmd, abi_long arg)
5363 {
5364     struct timeval tv;
5365     abi_long ret;
5366 
5367     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5368     if (is_error(ret)) {
5369         return ret;
5370     }
5371 
5372     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5373         if (copy_to_user_timeval(arg, &tv)) {
5374             return -TARGET_EFAULT;
5375         }
5376     } else {
5377         if (copy_to_user_timeval64(arg, &tv)) {
5378             return -TARGET_EFAULT;
5379         }
5380     }
5381 
5382     return ret;
5383 }
5384 
5385 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5386                                       int fd, int cmd, abi_long arg)
5387 {
5388     struct timespec ts;
5389     abi_long ret;
5390 
5391     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5392     if (is_error(ret)) {
5393         return ret;
5394     }
5395 
5396     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5397         if (host_to_target_timespec(arg, &ts)) {
5398             return -TARGET_EFAULT;
5399         }
5400     } else{
5401         if (host_to_target_timespec64(arg, &ts)) {
5402             return -TARGET_EFAULT;
5403         }
5404     }
5405 
5406     return ret;
5407 }
5408 
5409 #ifdef TIOCGPTPEER
5410 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5411                                      int fd, int cmd, abi_long arg)
5412 {
5413     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5414     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5415 }
5416 #endif
5417 
5418 #ifdef HAVE_DRM_H
5419 
5420 static void unlock_drm_version(struct drm_version *host_ver,
5421                                struct target_drm_version *target_ver,
5422                                bool copy)
5423 {
5424     unlock_user(host_ver->name, target_ver->name,
5425                                 copy ? host_ver->name_len : 0);
5426     unlock_user(host_ver->date, target_ver->date,
5427                                 copy ? host_ver->date_len : 0);
5428     unlock_user(host_ver->desc, target_ver->desc,
5429                                 copy ? host_ver->desc_len : 0);
5430 }
5431 
5432 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5433                                           struct target_drm_version *target_ver)
5434 {
5435     memset(host_ver, 0, sizeof(*host_ver));
5436 
5437     __get_user(host_ver->name_len, &target_ver->name_len);
5438     if (host_ver->name_len) {
5439         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5440                                    target_ver->name_len, 0);
5441         if (!host_ver->name) {
5442             return -EFAULT;
5443         }
5444     }
5445 
5446     __get_user(host_ver->date_len, &target_ver->date_len);
5447     if (host_ver->date_len) {
5448         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5449                                    target_ver->date_len, 0);
5450         if (!host_ver->date) {
5451             goto err;
5452         }
5453     }
5454 
5455     __get_user(host_ver->desc_len, &target_ver->desc_len);
5456     if (host_ver->desc_len) {
5457         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5458                                    target_ver->desc_len, 0);
5459         if (!host_ver->desc) {
5460             goto err;
5461         }
5462     }
5463 
5464     return 0;
5465 err:
5466     unlock_drm_version(host_ver, target_ver, false);
5467     return -EFAULT;
5468 }
5469 
5470 static inline void host_to_target_drmversion(
5471                                           struct target_drm_version *target_ver,
5472                                           struct drm_version *host_ver)
5473 {
5474     __put_user(host_ver->version_major, &target_ver->version_major);
5475     __put_user(host_ver->version_minor, &target_ver->version_minor);
5476     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5477     __put_user(host_ver->name_len, &target_ver->name_len);
5478     __put_user(host_ver->date_len, &target_ver->date_len);
5479     __put_user(host_ver->desc_len, &target_ver->desc_len);
5480     unlock_drm_version(host_ver, target_ver, true);
5481 }
5482 
5483 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5484                              int fd, int cmd, abi_long arg)
5485 {
5486     struct drm_version *ver;
5487     struct target_drm_version *target_ver;
5488     abi_long ret;
5489 
5490     switch (ie->host_cmd) {
5491     case DRM_IOCTL_VERSION:
5492         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5493             return -TARGET_EFAULT;
5494         }
5495         ver = (struct drm_version *)buf_temp;
5496         ret = target_to_host_drmversion(ver, target_ver);
5497         if (!is_error(ret)) {
5498             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5499             if (is_error(ret)) {
5500                 unlock_drm_version(ver, target_ver, false);
5501             } else {
5502                 host_to_target_drmversion(target_ver, ver);
5503             }
5504         }
5505         unlock_user_struct(target_ver, arg, 0);
5506         return ret;
5507     }
5508     return -TARGET_ENOSYS;
5509 }
5510 
5511 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5512                                            struct drm_i915_getparam *gparam,
5513                                            int fd, abi_long arg)
5514 {
5515     abi_long ret;
5516     int value;
5517     struct target_drm_i915_getparam *target_gparam;
5518 
5519     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5520         return -TARGET_EFAULT;
5521     }
5522 
5523     __get_user(gparam->param, &target_gparam->param);
5524     gparam->value = &value;
5525     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5526     put_user_s32(value, target_gparam->value);
5527 
5528     unlock_user_struct(target_gparam, arg, 0);
5529     return ret;
5530 }
5531 
5532 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5533                                   int fd, int cmd, abi_long arg)
5534 {
5535     switch (ie->host_cmd) {
5536     case DRM_IOCTL_I915_GETPARAM:
5537         return do_ioctl_drm_i915_getparam(ie,
5538                                           (struct drm_i915_getparam *)buf_temp,
5539                                           fd, arg);
5540     default:
5541         return -TARGET_ENOSYS;
5542     }
5543 }
5544 
5545 #endif
5546 
5547 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5548                                         int fd, int cmd, abi_long arg)
5549 {
5550     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5551     struct tun_filter *target_filter;
5552     char *target_addr;
5553 
5554     assert(ie->access == IOC_W);
5555 
5556     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5557     if (!target_filter) {
5558         return -TARGET_EFAULT;
5559     }
5560     filter->flags = tswap16(target_filter->flags);
5561     filter->count = tswap16(target_filter->count);
5562     unlock_user(target_filter, arg, 0);
5563 
5564     if (filter->count) {
5565         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5566             MAX_STRUCT_SIZE) {
5567             return -TARGET_EFAULT;
5568         }
5569 
5570         target_addr = lock_user(VERIFY_READ,
5571                                 arg + offsetof(struct tun_filter, addr),
5572                                 filter->count * ETH_ALEN, 1);
5573         if (!target_addr) {
5574             return -TARGET_EFAULT;
5575         }
5576         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5577         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5578     }
5579 
5580     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5581 }
5582 
5583 IOCTLEntry ioctl_entries[] = {
5584 #define IOCTL(cmd, access, ...) \
5585     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5586 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5587     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5588 #define IOCTL_IGNORE(cmd) \
5589     { TARGET_ ## cmd, 0, #cmd },
5590 #include "ioctls.h"
5591     { 0, 0, },
5592 };
5593 
5594 /* ??? Implement proper locking for ioctls.  */
5595 /* do_ioctl() Must return target values and target errnos. */
5596 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5597 {
5598     const IOCTLEntry *ie;
5599     const argtype *arg_type;
5600     abi_long ret;
5601     uint8_t buf_temp[MAX_STRUCT_SIZE];
5602     int target_size;
5603     void *argptr;
5604 
5605     ie = ioctl_entries;
5606     for(;;) {
5607         if (ie->target_cmd == 0) {
5608             qemu_log_mask(
5609                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5610             return -TARGET_ENOTTY;
5611         }
5612         if (ie->target_cmd == cmd)
5613             break;
5614         ie++;
5615     }
5616     arg_type = ie->arg_type;
5617     if (ie->do_ioctl) {
5618         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5619     } else if (!ie->host_cmd) {
5620         /* Some architectures define BSD ioctls in their headers
5621            that are not implemented in Linux.  */
5622         return -TARGET_ENOTTY;
5623     }
5624 
5625     switch(arg_type[0]) {
5626     case TYPE_NULL:
5627         /* no argument */
5628         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5629         break;
5630     case TYPE_PTRVOID:
5631     case TYPE_INT:
5632     case TYPE_LONG:
5633     case TYPE_ULONG:
5634         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5635         break;
5636     case TYPE_PTR:
5637         arg_type++;
5638         target_size = thunk_type_size(arg_type, 0);
5639         switch(ie->access) {
5640         case IOC_R:
5641             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5642             if (!is_error(ret)) {
5643                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5644                 if (!argptr)
5645                     return -TARGET_EFAULT;
5646                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5647                 unlock_user(argptr, arg, target_size);
5648             }
5649             break;
5650         case IOC_W:
5651             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5652             if (!argptr)
5653                 return -TARGET_EFAULT;
5654             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5655             unlock_user(argptr, arg, 0);
5656             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5657             break;
5658         default:
5659         case IOC_RW:
5660             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5661             if (!argptr)
5662                 return -TARGET_EFAULT;
5663             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5664             unlock_user(argptr, arg, 0);
5665             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5666             if (!is_error(ret)) {
5667                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5668                 if (!argptr)
5669                     return -TARGET_EFAULT;
5670                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5671                 unlock_user(argptr, arg, target_size);
5672             }
5673             break;
5674         }
5675         break;
5676     default:
5677         qemu_log_mask(LOG_UNIMP,
5678                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5679                       (long)cmd, arg_type[0]);
5680         ret = -TARGET_ENOTTY;
5681         break;
5682     }
5683     return ret;
5684 }
5685 
5686 static const bitmask_transtbl iflag_tbl[] = {
5687         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5688         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5689         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5690         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5691         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5692         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5693         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5694         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5695         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5696         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5697         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5698         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5699         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5700         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5701         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5702 };
5703 
5704 static const bitmask_transtbl oflag_tbl[] = {
5705 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5706 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5707 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5708 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5709 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5710 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5711 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5712 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5713 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5714 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5715 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5716 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5717 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5718 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5719 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5720 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5721 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5722 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5723 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5724 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5725 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5726 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5727 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5728 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5729 };
5730 
5731 static const bitmask_transtbl cflag_tbl[] = {
5732 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5733 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5734 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5735 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5736 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5737 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5738 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5739 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5740 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5741 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5742 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5743 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5744 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5745 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5746 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5747 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5748 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5749 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5750 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5751 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5752 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5753 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5754 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5755 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5756 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5757 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5758 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5759 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5760 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5761 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5762 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5763 };
5764 
5765 static const bitmask_transtbl lflag_tbl[] = {
5766   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5767   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5768   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5769   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5770   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5771   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5772   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5773   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5774   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5775   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5776   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5777   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5778   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5779   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5780   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5781   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5782 };
5783 
5784 static void target_to_host_termios (void *dst, const void *src)
5785 {
5786     struct host_termios *host = dst;
5787     const struct target_termios *target = src;
5788 
5789     host->c_iflag =
5790         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5791     host->c_oflag =
5792         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5793     host->c_cflag =
5794         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5795     host->c_lflag =
5796         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5797     host->c_line = target->c_line;
5798 
5799     memset(host->c_cc, 0, sizeof(host->c_cc));
5800     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5801     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5802     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5803     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5804     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5805     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5806     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5807     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5808     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5809     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5810     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5811     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5812     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5813     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5814     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5815     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5816     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5817 }
5818 
5819 static void host_to_target_termios (void *dst, const void *src)
5820 {
5821     struct target_termios *target = dst;
5822     const struct host_termios *host = src;
5823 
5824     target->c_iflag =
5825         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5826     target->c_oflag =
5827         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5828     target->c_cflag =
5829         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5830     target->c_lflag =
5831         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5832     target->c_line = host->c_line;
5833 
5834     memset(target->c_cc, 0, sizeof(target->c_cc));
5835     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5836     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5837     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5838     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5839     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5840     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5841     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5842     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5843     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5844     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5845     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5846     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5847     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5848     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5849     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5850     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5851     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5852 }
5853 
5854 static const StructEntry struct_termios_def = {
5855     .convert = { host_to_target_termios, target_to_host_termios },
5856     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5857     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5858     .print = print_termios,
5859 };
5860 
5861 /* If the host does not provide these bits, they may be safely discarded. */
5862 #ifndef MAP_SYNC
5863 #define MAP_SYNC 0
5864 #endif
5865 #ifndef MAP_UNINITIALIZED
5866 #define MAP_UNINITIALIZED 0
5867 #endif
5868 
5869 static const bitmask_transtbl mmap_flags_tbl[] = {
5870     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5871     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5872       MAP_ANONYMOUS, MAP_ANONYMOUS },
5873     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5874       MAP_GROWSDOWN, MAP_GROWSDOWN },
5875     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5876       MAP_DENYWRITE, MAP_DENYWRITE },
5877     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5878       MAP_EXECUTABLE, MAP_EXECUTABLE },
5879     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5880     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5881       MAP_NORESERVE, MAP_NORESERVE },
5882     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5883     /* MAP_STACK had been ignored by the kernel for quite some time.
5884        Recognize it for the target insofar as we do not want to pass
5885        it through to the host.  */
5886     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5887     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5888     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5889     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5890       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5891     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5892       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5893 };
5894 
5895 /*
5896  * Arrange for legacy / undefined architecture specific flags to be
5897  * ignored by mmap handling code.
5898  */
5899 #ifndef TARGET_MAP_32BIT
5900 #define TARGET_MAP_32BIT 0
5901 #endif
5902 #ifndef TARGET_MAP_HUGE_2MB
5903 #define TARGET_MAP_HUGE_2MB 0
5904 #endif
5905 #ifndef TARGET_MAP_HUGE_1GB
5906 #define TARGET_MAP_HUGE_1GB 0
5907 #endif
5908 
5909 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5910                         int target_flags, int fd, off_t offset)
5911 {
5912     /*
5913      * The historical set of flags that all mmap types implicitly support.
5914      */
5915     enum {
5916         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5917                                | TARGET_MAP_PRIVATE
5918                                | TARGET_MAP_FIXED
5919                                | TARGET_MAP_ANONYMOUS
5920                                | TARGET_MAP_DENYWRITE
5921                                | TARGET_MAP_EXECUTABLE
5922                                | TARGET_MAP_UNINITIALIZED
5923                                | TARGET_MAP_GROWSDOWN
5924                                | TARGET_MAP_LOCKED
5925                                | TARGET_MAP_NORESERVE
5926                                | TARGET_MAP_POPULATE
5927                                | TARGET_MAP_NONBLOCK
5928                                | TARGET_MAP_STACK
5929                                | TARGET_MAP_HUGETLB
5930                                | TARGET_MAP_32BIT
5931                                | TARGET_MAP_HUGE_2MB
5932                                | TARGET_MAP_HUGE_1GB
5933     };
5934     int host_flags;
5935 
5936     switch (target_flags & TARGET_MAP_TYPE) {
5937     case TARGET_MAP_PRIVATE:
5938         host_flags = MAP_PRIVATE;
5939         break;
5940     case TARGET_MAP_SHARED:
5941         host_flags = MAP_SHARED;
5942         break;
5943     case TARGET_MAP_SHARED_VALIDATE:
5944         /*
5945          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5946          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5947          */
5948         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5949             return -TARGET_EOPNOTSUPP;
5950         }
5951         host_flags = MAP_SHARED_VALIDATE;
5952         if (target_flags & TARGET_MAP_SYNC) {
5953             host_flags |= MAP_SYNC;
5954         }
5955         break;
5956     default:
5957         return -TARGET_EINVAL;
5958     }
5959     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5960 
5961     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5962 }
5963 
5964 /*
5965  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5966  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5967  */
5968 #if defined(TARGET_I386)
5969 
5970 /* NOTE: there is really one LDT for all the threads */
5971 static uint8_t *ldt_table;
5972 
5973 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5974 {
5975     int size;
5976     void *p;
5977 
5978     if (!ldt_table)
5979         return 0;
5980     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5981     if (size > bytecount)
5982         size = bytecount;
5983     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5984     if (!p)
5985         return -TARGET_EFAULT;
5986     /* ??? Should this by byteswapped?  */
5987     memcpy(p, ldt_table, size);
5988     unlock_user(p, ptr, size);
5989     return size;
5990 }
5991 
5992 /* XXX: add locking support */
5993 static abi_long write_ldt(CPUX86State *env,
5994                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5995 {
5996     struct target_modify_ldt_ldt_s ldt_info;
5997     struct target_modify_ldt_ldt_s *target_ldt_info;
5998     int seg_32bit, contents, read_exec_only, limit_in_pages;
5999     int seg_not_present, useable, lm;
6000     uint32_t *lp, entry_1, entry_2;
6001 
6002     if (bytecount != sizeof(ldt_info))
6003         return -TARGET_EINVAL;
6004     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6005         return -TARGET_EFAULT;
6006     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6007     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6008     ldt_info.limit = tswap32(target_ldt_info->limit);
6009     ldt_info.flags = tswap32(target_ldt_info->flags);
6010     unlock_user_struct(target_ldt_info, ptr, 0);
6011 
6012     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6013         return -TARGET_EINVAL;
6014     seg_32bit = ldt_info.flags & 1;
6015     contents = (ldt_info.flags >> 1) & 3;
6016     read_exec_only = (ldt_info.flags >> 3) & 1;
6017     limit_in_pages = (ldt_info.flags >> 4) & 1;
6018     seg_not_present = (ldt_info.flags >> 5) & 1;
6019     useable = (ldt_info.flags >> 6) & 1;
6020 #ifdef TARGET_ABI32
6021     lm = 0;
6022 #else
6023     lm = (ldt_info.flags >> 7) & 1;
6024 #endif
6025     if (contents == 3) {
6026         if (oldmode)
6027             return -TARGET_EINVAL;
6028         if (seg_not_present == 0)
6029             return -TARGET_EINVAL;
6030     }
6031     /* allocate the LDT */
6032     if (!ldt_table) {
6033         env->ldt.base = target_mmap(0,
6034                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6035                                     PROT_READ|PROT_WRITE,
6036                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6037         if (env->ldt.base == -1)
6038             return -TARGET_ENOMEM;
6039         memset(g2h_untagged(env->ldt.base), 0,
6040                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6041         env->ldt.limit = 0xffff;
6042         ldt_table = g2h_untagged(env->ldt.base);
6043     }
6044 
6045     /* NOTE: same code as Linux kernel */
6046     /* Allow LDTs to be cleared by the user. */
6047     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6048         if (oldmode ||
6049             (contents == 0		&&
6050              read_exec_only == 1	&&
6051              seg_32bit == 0		&&
6052              limit_in_pages == 0	&&
6053              seg_not_present == 1	&&
6054              useable == 0 )) {
6055             entry_1 = 0;
6056             entry_2 = 0;
6057             goto install;
6058         }
6059     }
6060 
6061     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6062         (ldt_info.limit & 0x0ffff);
6063     entry_2 = (ldt_info.base_addr & 0xff000000) |
6064         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6065         (ldt_info.limit & 0xf0000) |
6066         ((read_exec_only ^ 1) << 9) |
6067         (contents << 10) |
6068         ((seg_not_present ^ 1) << 15) |
6069         (seg_32bit << 22) |
6070         (limit_in_pages << 23) |
6071         (lm << 21) |
6072         0x7000;
6073     if (!oldmode)
6074         entry_2 |= (useable << 20);
6075 
6076     /* Install the new entry ...  */
6077 install:
6078     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6079     lp[0] = tswap32(entry_1);
6080     lp[1] = tswap32(entry_2);
6081     return 0;
6082 }
6083 
6084 /* specific and weird i386 syscalls */
6085 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6086                               unsigned long bytecount)
6087 {
6088     abi_long ret;
6089 
6090     switch (func) {
6091     case 0:
6092         ret = read_ldt(ptr, bytecount);
6093         break;
6094     case 1:
6095         ret = write_ldt(env, ptr, bytecount, 1);
6096         break;
6097     case 0x11:
6098         ret = write_ldt(env, ptr, bytecount, 0);
6099         break;
6100     default:
6101         ret = -TARGET_ENOSYS;
6102         break;
6103     }
6104     return ret;
6105 }
6106 
6107 #if defined(TARGET_ABI32)
6108 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6109 {
6110     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6111     struct target_modify_ldt_ldt_s ldt_info;
6112     struct target_modify_ldt_ldt_s *target_ldt_info;
6113     int seg_32bit, contents, read_exec_only, limit_in_pages;
6114     int seg_not_present, useable, lm;
6115     uint32_t *lp, entry_1, entry_2;
6116     int i;
6117 
6118     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6119     if (!target_ldt_info)
6120         return -TARGET_EFAULT;
6121     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6122     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6123     ldt_info.limit = tswap32(target_ldt_info->limit);
6124     ldt_info.flags = tswap32(target_ldt_info->flags);
6125     if (ldt_info.entry_number == -1) {
6126         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6127             if (gdt_table[i] == 0) {
6128                 ldt_info.entry_number = i;
6129                 target_ldt_info->entry_number = tswap32(i);
6130                 break;
6131             }
6132         }
6133     }
6134     unlock_user_struct(target_ldt_info, ptr, 1);
6135 
6136     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6137         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6138            return -TARGET_EINVAL;
6139     seg_32bit = ldt_info.flags & 1;
6140     contents = (ldt_info.flags >> 1) & 3;
6141     read_exec_only = (ldt_info.flags >> 3) & 1;
6142     limit_in_pages = (ldt_info.flags >> 4) & 1;
6143     seg_not_present = (ldt_info.flags >> 5) & 1;
6144     useable = (ldt_info.flags >> 6) & 1;
6145 #ifdef TARGET_ABI32
6146     lm = 0;
6147 #else
6148     lm = (ldt_info.flags >> 7) & 1;
6149 #endif
6150 
6151     if (contents == 3) {
6152         if (seg_not_present == 0)
6153             return -TARGET_EINVAL;
6154     }
6155 
6156     /* NOTE: same code as Linux kernel */
6157     /* Allow LDTs to be cleared by the user. */
6158     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6159         if ((contents == 0             &&
6160              read_exec_only == 1       &&
6161              seg_32bit == 0            &&
6162              limit_in_pages == 0       &&
6163              seg_not_present == 1      &&
6164              useable == 0 )) {
6165             entry_1 = 0;
6166             entry_2 = 0;
6167             goto install;
6168         }
6169     }
6170 
6171     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6172         (ldt_info.limit & 0x0ffff);
6173     entry_2 = (ldt_info.base_addr & 0xff000000) |
6174         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6175         (ldt_info.limit & 0xf0000) |
6176         ((read_exec_only ^ 1) << 9) |
6177         (contents << 10) |
6178         ((seg_not_present ^ 1) << 15) |
6179         (seg_32bit << 22) |
6180         (limit_in_pages << 23) |
6181         (useable << 20) |
6182         (lm << 21) |
6183         0x7000;
6184 
6185     /* Install the new entry ...  */
6186 install:
6187     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6188     lp[0] = tswap32(entry_1);
6189     lp[1] = tswap32(entry_2);
6190     return 0;
6191 }
6192 
6193 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6194 {
6195     struct target_modify_ldt_ldt_s *target_ldt_info;
6196     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6197     uint32_t base_addr, limit, flags;
6198     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6199     int seg_not_present, useable, lm;
6200     uint32_t *lp, entry_1, entry_2;
6201 
6202     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6203     if (!target_ldt_info)
6204         return -TARGET_EFAULT;
6205     idx = tswap32(target_ldt_info->entry_number);
6206     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6207         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6208         unlock_user_struct(target_ldt_info, ptr, 1);
6209         return -TARGET_EINVAL;
6210     }
6211     lp = (uint32_t *)(gdt_table + idx);
6212     entry_1 = tswap32(lp[0]);
6213     entry_2 = tswap32(lp[1]);
6214 
6215     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6216     contents = (entry_2 >> 10) & 3;
6217     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6218     seg_32bit = (entry_2 >> 22) & 1;
6219     limit_in_pages = (entry_2 >> 23) & 1;
6220     useable = (entry_2 >> 20) & 1;
6221 #ifdef TARGET_ABI32
6222     lm = 0;
6223 #else
6224     lm = (entry_2 >> 21) & 1;
6225 #endif
6226     flags = (seg_32bit << 0) | (contents << 1) |
6227         (read_exec_only << 3) | (limit_in_pages << 4) |
6228         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6229     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6230     base_addr = (entry_1 >> 16) |
6231         (entry_2 & 0xff000000) |
6232         ((entry_2 & 0xff) << 16);
6233     target_ldt_info->base_addr = tswapal(base_addr);
6234     target_ldt_info->limit = tswap32(limit);
6235     target_ldt_info->flags = tswap32(flags);
6236     unlock_user_struct(target_ldt_info, ptr, 1);
6237     return 0;
6238 }
6239 
6240 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6241 {
6242     return -TARGET_ENOSYS;
6243 }
6244 #else
6245 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6246 {
6247     abi_long ret = 0;
6248     abi_ulong val;
6249     int idx;
6250 
6251     switch(code) {
6252     case TARGET_ARCH_SET_GS:
6253     case TARGET_ARCH_SET_FS:
6254         if (code == TARGET_ARCH_SET_GS)
6255             idx = R_GS;
6256         else
6257             idx = R_FS;
6258         cpu_x86_load_seg(env, idx, 0);
6259         env->segs[idx].base = addr;
6260         break;
6261     case TARGET_ARCH_GET_GS:
6262     case TARGET_ARCH_GET_FS:
6263         if (code == TARGET_ARCH_GET_GS)
6264             idx = R_GS;
6265         else
6266             idx = R_FS;
6267         val = env->segs[idx].base;
6268         if (put_user(val, addr, abi_ulong))
6269             ret = -TARGET_EFAULT;
6270         break;
6271     default:
6272         ret = -TARGET_EINVAL;
6273         break;
6274     }
6275     return ret;
6276 }
6277 #endif /* defined(TARGET_ABI32 */
6278 #endif /* defined(TARGET_I386) */
6279 
6280 /*
6281  * These constants are generic.  Supply any that are missing from the host.
6282  */
6283 #ifndef PR_SET_NAME
6284 # define PR_SET_NAME    15
6285 # define PR_GET_NAME    16
6286 #endif
6287 #ifndef PR_SET_FP_MODE
6288 # define PR_SET_FP_MODE 45
6289 # define PR_GET_FP_MODE 46
6290 # define PR_FP_MODE_FR   (1 << 0)
6291 # define PR_FP_MODE_FRE  (1 << 1)
6292 #endif
6293 #ifndef PR_SVE_SET_VL
6294 # define PR_SVE_SET_VL  50
6295 # define PR_SVE_GET_VL  51
6296 # define PR_SVE_VL_LEN_MASK  0xffff
6297 # define PR_SVE_VL_INHERIT   (1 << 17)
6298 #endif
6299 #ifndef PR_PAC_RESET_KEYS
6300 # define PR_PAC_RESET_KEYS  54
6301 # define PR_PAC_APIAKEY   (1 << 0)
6302 # define PR_PAC_APIBKEY   (1 << 1)
6303 # define PR_PAC_APDAKEY   (1 << 2)
6304 # define PR_PAC_APDBKEY   (1 << 3)
6305 # define PR_PAC_APGAKEY   (1 << 4)
6306 #endif
6307 #ifndef PR_SET_TAGGED_ADDR_CTRL
6308 # define PR_SET_TAGGED_ADDR_CTRL 55
6309 # define PR_GET_TAGGED_ADDR_CTRL 56
6310 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6311 #endif
6312 #ifndef PR_SET_IO_FLUSHER
6313 # define PR_SET_IO_FLUSHER 57
6314 # define PR_GET_IO_FLUSHER 58
6315 #endif
6316 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6317 # define PR_SET_SYSCALL_USER_DISPATCH 59
6318 #endif
6319 #ifndef PR_SME_SET_VL
6320 # define PR_SME_SET_VL  63
6321 # define PR_SME_GET_VL  64
6322 # define PR_SME_VL_LEN_MASK  0xffff
6323 # define PR_SME_VL_INHERIT   (1 << 17)
6324 #endif
6325 
6326 #include "target_prctl.h"
6327 
6328 static abi_long do_prctl_inval0(CPUArchState *env)
6329 {
6330     return -TARGET_EINVAL;
6331 }
6332 
6333 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6334 {
6335     return -TARGET_EINVAL;
6336 }
6337 
6338 #ifndef do_prctl_get_fp_mode
6339 #define do_prctl_get_fp_mode do_prctl_inval0
6340 #endif
6341 #ifndef do_prctl_set_fp_mode
6342 #define do_prctl_set_fp_mode do_prctl_inval1
6343 #endif
6344 #ifndef do_prctl_sve_get_vl
6345 #define do_prctl_sve_get_vl do_prctl_inval0
6346 #endif
6347 #ifndef do_prctl_sve_set_vl
6348 #define do_prctl_sve_set_vl do_prctl_inval1
6349 #endif
6350 #ifndef do_prctl_reset_keys
6351 #define do_prctl_reset_keys do_prctl_inval1
6352 #endif
6353 #ifndef do_prctl_set_tagged_addr_ctrl
6354 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6355 #endif
6356 #ifndef do_prctl_get_tagged_addr_ctrl
6357 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6358 #endif
6359 #ifndef do_prctl_get_unalign
6360 #define do_prctl_get_unalign do_prctl_inval1
6361 #endif
6362 #ifndef do_prctl_set_unalign
6363 #define do_prctl_set_unalign do_prctl_inval1
6364 #endif
6365 #ifndef do_prctl_sme_get_vl
6366 #define do_prctl_sme_get_vl do_prctl_inval0
6367 #endif
6368 #ifndef do_prctl_sme_set_vl
6369 #define do_prctl_sme_set_vl do_prctl_inval1
6370 #endif
6371 
6372 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6373                          abi_long arg3, abi_long arg4, abi_long arg5)
6374 {
6375     abi_long ret;
6376 
6377     switch (option) {
6378     case PR_GET_PDEATHSIG:
6379         {
6380             int deathsig;
6381             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6382                                   arg3, arg4, arg5));
6383             if (!is_error(ret) &&
6384                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6385                 return -TARGET_EFAULT;
6386             }
6387             return ret;
6388         }
6389     case PR_SET_PDEATHSIG:
6390         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6391                                arg3, arg4, arg5));
6392     case PR_GET_NAME:
6393         {
6394             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6395             if (!name) {
6396                 return -TARGET_EFAULT;
6397             }
6398             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6399                                   arg3, arg4, arg5));
6400             unlock_user(name, arg2, 16);
6401             return ret;
6402         }
6403     case PR_SET_NAME:
6404         {
6405             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6406             if (!name) {
6407                 return -TARGET_EFAULT;
6408             }
6409             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6410                                   arg3, arg4, arg5));
6411             unlock_user(name, arg2, 0);
6412             return ret;
6413         }
6414     case PR_GET_FP_MODE:
6415         return do_prctl_get_fp_mode(env);
6416     case PR_SET_FP_MODE:
6417         return do_prctl_set_fp_mode(env, arg2);
6418     case PR_SVE_GET_VL:
6419         return do_prctl_sve_get_vl(env);
6420     case PR_SVE_SET_VL:
6421         return do_prctl_sve_set_vl(env, arg2);
6422     case PR_SME_GET_VL:
6423         return do_prctl_sme_get_vl(env);
6424     case PR_SME_SET_VL:
6425         return do_prctl_sme_set_vl(env, arg2);
6426     case PR_PAC_RESET_KEYS:
6427         if (arg3 || arg4 || arg5) {
6428             return -TARGET_EINVAL;
6429         }
6430         return do_prctl_reset_keys(env, arg2);
6431     case PR_SET_TAGGED_ADDR_CTRL:
6432         if (arg3 || arg4 || arg5) {
6433             return -TARGET_EINVAL;
6434         }
6435         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6436     case PR_GET_TAGGED_ADDR_CTRL:
6437         if (arg2 || arg3 || arg4 || arg5) {
6438             return -TARGET_EINVAL;
6439         }
6440         return do_prctl_get_tagged_addr_ctrl(env);
6441 
6442     case PR_GET_UNALIGN:
6443         return do_prctl_get_unalign(env, arg2);
6444     case PR_SET_UNALIGN:
6445         return do_prctl_set_unalign(env, arg2);
6446 
6447     case PR_CAP_AMBIENT:
6448     case PR_CAPBSET_READ:
6449     case PR_CAPBSET_DROP:
6450     case PR_GET_DUMPABLE:
6451     case PR_SET_DUMPABLE:
6452     case PR_GET_KEEPCAPS:
6453     case PR_SET_KEEPCAPS:
6454     case PR_GET_SECUREBITS:
6455     case PR_SET_SECUREBITS:
6456     case PR_GET_TIMING:
6457     case PR_SET_TIMING:
6458     case PR_GET_TIMERSLACK:
6459     case PR_SET_TIMERSLACK:
6460     case PR_MCE_KILL:
6461     case PR_MCE_KILL_GET:
6462     case PR_GET_NO_NEW_PRIVS:
6463     case PR_SET_NO_NEW_PRIVS:
6464     case PR_GET_IO_FLUSHER:
6465     case PR_SET_IO_FLUSHER:
6466     case PR_SET_CHILD_SUBREAPER:
6467     case PR_GET_SPECULATION_CTRL:
6468     case PR_SET_SPECULATION_CTRL:
6469         /* Some prctl options have no pointer arguments and we can pass on. */
6470         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6471 
6472     case PR_GET_CHILD_SUBREAPER:
6473         {
6474             int val;
6475             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6476                                   arg3, arg4, arg5));
6477             if (!is_error(ret) && put_user_s32(val, arg2)) {
6478                 return -TARGET_EFAULT;
6479             }
6480             return ret;
6481         }
6482 
6483     case PR_GET_TID_ADDRESS:
6484         {
6485             TaskState *ts = get_task_state(env_cpu(env));
6486             return put_user_ual(ts->child_tidptr, arg2);
6487         }
6488 
6489     case PR_GET_FPEXC:
6490     case PR_SET_FPEXC:
6491         /* Was used for SPE on PowerPC. */
6492         return -TARGET_EINVAL;
6493 
6494     case PR_GET_ENDIAN:
6495     case PR_SET_ENDIAN:
6496     case PR_GET_FPEMU:
6497     case PR_SET_FPEMU:
6498     case PR_SET_MM:
6499     case PR_GET_SECCOMP:
6500     case PR_SET_SECCOMP:
6501     case PR_SET_SYSCALL_USER_DISPATCH:
6502     case PR_GET_THP_DISABLE:
6503     case PR_SET_THP_DISABLE:
6504     case PR_GET_TSC:
6505     case PR_SET_TSC:
6506         /* Disable to prevent the target disabling stuff we need. */
6507         return -TARGET_EINVAL;
6508 
6509     default:
6510         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6511                       option);
6512         return -TARGET_EINVAL;
6513     }
6514 }
6515 
6516 #define NEW_STACK_SIZE 0x40000
6517 
6518 
6519 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6520 typedef struct {
6521     CPUArchState *env;
6522     pthread_mutex_t mutex;
6523     pthread_cond_t cond;
6524     pthread_t thread;
6525     uint32_t tid;
6526     abi_ulong child_tidptr;
6527     abi_ulong parent_tidptr;
6528     sigset_t sigmask;
6529 } new_thread_info;
6530 
6531 static void *clone_func(void *arg)
6532 {
6533     new_thread_info *info = arg;
6534     CPUArchState *env;
6535     CPUState *cpu;
6536     TaskState *ts;
6537 
6538     rcu_register_thread();
6539     tcg_register_thread();
6540     env = info->env;
6541     cpu = env_cpu(env);
6542     thread_cpu = cpu;
6543     ts = get_task_state(cpu);
6544     info->tid = sys_gettid();
6545     task_settid(ts);
6546     if (info->child_tidptr)
6547         put_user_u32(info->tid, info->child_tidptr);
6548     if (info->parent_tidptr)
6549         put_user_u32(info->tid, info->parent_tidptr);
6550     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6551     /* Enable signals.  */
6552     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6553     /* Signal to the parent that we're ready.  */
6554     pthread_mutex_lock(&info->mutex);
6555     pthread_cond_broadcast(&info->cond);
6556     pthread_mutex_unlock(&info->mutex);
6557     /* Wait until the parent has finished initializing the tls state.  */
6558     pthread_mutex_lock(&clone_lock);
6559     pthread_mutex_unlock(&clone_lock);
6560     cpu_loop(env);
6561     /* never exits */
6562     return NULL;
6563 }
6564 
6565 /* do_fork() Must return host values and target errnos (unlike most
6566    do_*() functions). */
6567 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6568                    abi_ulong parent_tidptr, target_ulong newtls,
6569                    abi_ulong child_tidptr)
6570 {
6571     CPUState *cpu = env_cpu(env);
6572     int ret;
6573     TaskState *ts;
6574     CPUState *new_cpu;
6575     CPUArchState *new_env;
6576     sigset_t sigmask;
6577 
6578     flags &= ~CLONE_IGNORED_FLAGS;
6579 
6580     /* Emulate vfork() with fork() */
6581     if (flags & CLONE_VFORK)
6582         flags &= ~(CLONE_VFORK | CLONE_VM);
6583 
6584     if (flags & CLONE_VM) {
6585         TaskState *parent_ts = get_task_state(cpu);
6586         new_thread_info info;
6587         pthread_attr_t attr;
6588 
6589         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6590             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6591             return -TARGET_EINVAL;
6592         }
6593 
6594         ts = g_new0(TaskState, 1);
6595         init_task_state(ts);
6596 
6597         /* Grab a mutex so that thread setup appears atomic.  */
6598         pthread_mutex_lock(&clone_lock);
6599 
6600         /*
6601          * If this is our first additional thread, we need to ensure we
6602          * generate code for parallel execution and flush old translations.
6603          * Do this now so that the copy gets CF_PARALLEL too.
6604          */
6605         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6606             tcg_cflags_set(cpu, CF_PARALLEL);
6607             tb_flush(cpu);
6608         }
6609 
6610         /* we create a new CPU instance. */
6611         new_env = cpu_copy(env);
6612         /* Init regs that differ from the parent.  */
6613         cpu_clone_regs_child(new_env, newsp, flags);
6614         cpu_clone_regs_parent(env, flags);
6615         new_cpu = env_cpu(new_env);
6616         new_cpu->opaque = ts;
6617         ts->bprm = parent_ts->bprm;
6618         ts->info = parent_ts->info;
6619         ts->signal_mask = parent_ts->signal_mask;
6620 
6621         if (flags & CLONE_CHILD_CLEARTID) {
6622             ts->child_tidptr = child_tidptr;
6623         }
6624 
6625         if (flags & CLONE_SETTLS) {
6626             cpu_set_tls (new_env, newtls);
6627         }
6628 
6629         memset(&info, 0, sizeof(info));
6630         pthread_mutex_init(&info.mutex, NULL);
6631         pthread_mutex_lock(&info.mutex);
6632         pthread_cond_init(&info.cond, NULL);
6633         info.env = new_env;
6634         if (flags & CLONE_CHILD_SETTID) {
6635             info.child_tidptr = child_tidptr;
6636         }
6637         if (flags & CLONE_PARENT_SETTID) {
6638             info.parent_tidptr = parent_tidptr;
6639         }
6640 
6641         ret = pthread_attr_init(&attr);
6642         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6643         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6644         /* It is not safe to deliver signals until the child has finished
6645            initializing, so temporarily block all signals.  */
6646         sigfillset(&sigmask);
6647         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6648         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6649 
6650         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6651         /* TODO: Free new CPU state if thread creation failed.  */
6652 
6653         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6654         pthread_attr_destroy(&attr);
6655         if (ret == 0) {
6656             /* Wait for the child to initialize.  */
6657             pthread_cond_wait(&info.cond, &info.mutex);
6658             ret = info.tid;
6659         } else {
6660             ret = -1;
6661         }
6662         pthread_mutex_unlock(&info.mutex);
6663         pthread_cond_destroy(&info.cond);
6664         pthread_mutex_destroy(&info.mutex);
6665         pthread_mutex_unlock(&clone_lock);
6666     } else {
6667         /* if no CLONE_VM, we consider it is a fork */
6668         if (flags & CLONE_INVALID_FORK_FLAGS) {
6669             return -TARGET_EINVAL;
6670         }
6671 
6672         /* We can't support custom termination signals */
6673         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6674             return -TARGET_EINVAL;
6675         }
6676 
6677 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6678         if (flags & CLONE_PIDFD) {
6679             return -TARGET_EINVAL;
6680         }
6681 #endif
6682 
6683         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6684         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6685             return -TARGET_EINVAL;
6686         }
6687 
6688         if (block_signals()) {
6689             return -QEMU_ERESTARTSYS;
6690         }
6691 
6692         fork_start();
6693         ret = fork();
6694         if (ret == 0) {
6695             /* Child Process.  */
6696             cpu_clone_regs_child(env, newsp, flags);
6697             fork_end(ret);
6698             /* There is a race condition here.  The parent process could
6699                theoretically read the TID in the child process before the child
6700                tid is set.  This would require using either ptrace
6701                (not implemented) or having *_tidptr to point at a shared memory
6702                mapping.  We can't repeat the spinlock hack used above because
6703                the child process gets its own copy of the lock.  */
6704             if (flags & CLONE_CHILD_SETTID)
6705                 put_user_u32(sys_gettid(), child_tidptr);
6706             if (flags & CLONE_PARENT_SETTID)
6707                 put_user_u32(sys_gettid(), parent_tidptr);
6708             ts = get_task_state(cpu);
6709             if (flags & CLONE_SETTLS)
6710                 cpu_set_tls (env, newtls);
6711             if (flags & CLONE_CHILD_CLEARTID)
6712                 ts->child_tidptr = child_tidptr;
6713         } else {
6714             cpu_clone_regs_parent(env, flags);
6715             if (flags & CLONE_PIDFD) {
6716                 int pid_fd = 0;
6717 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6718                 int pid_child = ret;
6719                 pid_fd = pidfd_open(pid_child, 0);
6720                 if (pid_fd >= 0) {
6721                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6722                                                | FD_CLOEXEC);
6723                 } else {
6724                         pid_fd = 0;
6725                 }
6726 #endif
6727                 put_user_u32(pid_fd, parent_tidptr);
6728             }
6729             fork_end(ret);
6730         }
6731         g_assert(!cpu_in_exclusive_context(cpu));
6732     }
6733     return ret;
6734 }
6735 
6736 /* warning : doesn't handle linux specific flags... */
6737 static int target_to_host_fcntl_cmd(int cmd)
6738 {
6739     int ret;
6740 
6741     switch(cmd) {
6742     case TARGET_F_DUPFD:
6743     case TARGET_F_GETFD:
6744     case TARGET_F_SETFD:
6745     case TARGET_F_GETFL:
6746     case TARGET_F_SETFL:
6747     case TARGET_F_OFD_GETLK:
6748     case TARGET_F_OFD_SETLK:
6749     case TARGET_F_OFD_SETLKW:
6750         ret = cmd;
6751         break;
6752     case TARGET_F_GETLK:
6753         ret = F_GETLK;
6754         break;
6755     case TARGET_F_SETLK:
6756         ret = F_SETLK;
6757         break;
6758     case TARGET_F_SETLKW:
6759         ret = F_SETLKW;
6760         break;
6761     case TARGET_F_GETOWN:
6762         ret = F_GETOWN;
6763         break;
6764     case TARGET_F_SETOWN:
6765         ret = F_SETOWN;
6766         break;
6767     case TARGET_F_GETSIG:
6768         ret = F_GETSIG;
6769         break;
6770     case TARGET_F_SETSIG:
6771         ret = F_SETSIG;
6772         break;
6773 #if TARGET_ABI_BITS == 32
6774     case TARGET_F_GETLK64:
6775         ret = F_GETLK;
6776         break;
6777     case TARGET_F_SETLK64:
6778         ret = F_SETLK;
6779         break;
6780     case TARGET_F_SETLKW64:
6781         ret = F_SETLKW;
6782         break;
6783 #endif
6784     case TARGET_F_SETLEASE:
6785         ret = F_SETLEASE;
6786         break;
6787     case TARGET_F_GETLEASE:
6788         ret = F_GETLEASE;
6789         break;
6790 #ifdef F_DUPFD_CLOEXEC
6791     case TARGET_F_DUPFD_CLOEXEC:
6792         ret = F_DUPFD_CLOEXEC;
6793         break;
6794 #endif
6795     case TARGET_F_NOTIFY:
6796         ret = F_NOTIFY;
6797         break;
6798 #ifdef F_GETOWN_EX
6799     case TARGET_F_GETOWN_EX:
6800         ret = F_GETOWN_EX;
6801         break;
6802 #endif
6803 #ifdef F_SETOWN_EX
6804     case TARGET_F_SETOWN_EX:
6805         ret = F_SETOWN_EX;
6806         break;
6807 #endif
6808 #ifdef F_SETPIPE_SZ
6809     case TARGET_F_SETPIPE_SZ:
6810         ret = F_SETPIPE_SZ;
6811         break;
6812     case TARGET_F_GETPIPE_SZ:
6813         ret = F_GETPIPE_SZ;
6814         break;
6815 #endif
6816 #ifdef F_ADD_SEALS
6817     case TARGET_F_ADD_SEALS:
6818         ret = F_ADD_SEALS;
6819         break;
6820     case TARGET_F_GET_SEALS:
6821         ret = F_GET_SEALS;
6822         break;
6823 #endif
6824     default:
6825         ret = -TARGET_EINVAL;
6826         break;
6827     }
6828 
6829 #if defined(__powerpc64__)
6830     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6831      * is not supported by kernel. The glibc fcntl call actually adjusts
6832      * them to 5, 6 and 7 before making the syscall(). Since we make the
6833      * syscall directly, adjust to what is supported by the kernel.
6834      */
6835     if (ret >= F_GETLK && ret <= F_SETLKW) {
6836         ret -= F_GETLK - 5;
6837     }
6838 #endif
6839 
6840     return ret;
6841 }
6842 
6843 #define FLOCK_TRANSTBL \
6844     switch (type) { \
6845     TRANSTBL_CONVERT(F_RDLCK); \
6846     TRANSTBL_CONVERT(F_WRLCK); \
6847     TRANSTBL_CONVERT(F_UNLCK); \
6848     }
6849 
6850 static int target_to_host_flock(int type)
6851 {
6852 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6853     FLOCK_TRANSTBL
6854 #undef  TRANSTBL_CONVERT
6855     return -TARGET_EINVAL;
6856 }
6857 
6858 static int host_to_target_flock(int type)
6859 {
6860 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6861     FLOCK_TRANSTBL
6862 #undef  TRANSTBL_CONVERT
6863     /* if we don't know how to convert the value coming
6864      * from the host we copy to the target field as-is
6865      */
6866     return type;
6867 }
6868 
6869 static inline abi_long copy_from_user_flock(struct flock *fl,
6870                                             abi_ulong target_flock_addr)
6871 {
6872     struct target_flock *target_fl;
6873     int l_type;
6874 
6875     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6876         return -TARGET_EFAULT;
6877     }
6878 
6879     __get_user(l_type, &target_fl->l_type);
6880     l_type = target_to_host_flock(l_type);
6881     if (l_type < 0) {
6882         return l_type;
6883     }
6884     fl->l_type = l_type;
6885     __get_user(fl->l_whence, &target_fl->l_whence);
6886     __get_user(fl->l_start, &target_fl->l_start);
6887     __get_user(fl->l_len, &target_fl->l_len);
6888     __get_user(fl->l_pid, &target_fl->l_pid);
6889     unlock_user_struct(target_fl, target_flock_addr, 0);
6890     return 0;
6891 }
6892 
6893 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6894                                           const struct flock *fl)
6895 {
6896     struct target_flock *target_fl;
6897     short l_type;
6898 
6899     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6900         return -TARGET_EFAULT;
6901     }
6902 
6903     l_type = host_to_target_flock(fl->l_type);
6904     __put_user(l_type, &target_fl->l_type);
6905     __put_user(fl->l_whence, &target_fl->l_whence);
6906     __put_user(fl->l_start, &target_fl->l_start);
6907     __put_user(fl->l_len, &target_fl->l_len);
6908     __put_user(fl->l_pid, &target_fl->l_pid);
6909     unlock_user_struct(target_fl, target_flock_addr, 1);
6910     return 0;
6911 }
6912 
6913 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6914 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6915 
6916 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6917 struct target_oabi_flock64 {
6918     abi_short l_type;
6919     abi_short l_whence;
6920     abi_llong l_start;
6921     abi_llong l_len;
6922     abi_int   l_pid;
6923 } QEMU_PACKED;
6924 
6925 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6926                                                    abi_ulong target_flock_addr)
6927 {
6928     struct target_oabi_flock64 *target_fl;
6929     int l_type;
6930 
6931     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6932         return -TARGET_EFAULT;
6933     }
6934 
6935     __get_user(l_type, &target_fl->l_type);
6936     l_type = target_to_host_flock(l_type);
6937     if (l_type < 0) {
6938         return l_type;
6939     }
6940     fl->l_type = l_type;
6941     __get_user(fl->l_whence, &target_fl->l_whence);
6942     __get_user(fl->l_start, &target_fl->l_start);
6943     __get_user(fl->l_len, &target_fl->l_len);
6944     __get_user(fl->l_pid, &target_fl->l_pid);
6945     unlock_user_struct(target_fl, target_flock_addr, 0);
6946     return 0;
6947 }
6948 
6949 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6950                                                  const struct flock *fl)
6951 {
6952     struct target_oabi_flock64 *target_fl;
6953     short l_type;
6954 
6955     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6956         return -TARGET_EFAULT;
6957     }
6958 
6959     l_type = host_to_target_flock(fl->l_type);
6960     __put_user(l_type, &target_fl->l_type);
6961     __put_user(fl->l_whence, &target_fl->l_whence);
6962     __put_user(fl->l_start, &target_fl->l_start);
6963     __put_user(fl->l_len, &target_fl->l_len);
6964     __put_user(fl->l_pid, &target_fl->l_pid);
6965     unlock_user_struct(target_fl, target_flock_addr, 1);
6966     return 0;
6967 }
6968 #endif
6969 
6970 static inline abi_long copy_from_user_flock64(struct flock *fl,
6971                                               abi_ulong target_flock_addr)
6972 {
6973     struct target_flock64 *target_fl;
6974     int l_type;
6975 
6976     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6977         return -TARGET_EFAULT;
6978     }
6979 
6980     __get_user(l_type, &target_fl->l_type);
6981     l_type = target_to_host_flock(l_type);
6982     if (l_type < 0) {
6983         return l_type;
6984     }
6985     fl->l_type = l_type;
6986     __get_user(fl->l_whence, &target_fl->l_whence);
6987     __get_user(fl->l_start, &target_fl->l_start);
6988     __get_user(fl->l_len, &target_fl->l_len);
6989     __get_user(fl->l_pid, &target_fl->l_pid);
6990     unlock_user_struct(target_fl, target_flock_addr, 0);
6991     return 0;
6992 }
6993 
6994 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6995                                             const struct flock *fl)
6996 {
6997     struct target_flock64 *target_fl;
6998     short l_type;
6999 
7000     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7001         return -TARGET_EFAULT;
7002     }
7003 
7004     l_type = host_to_target_flock(fl->l_type);
7005     __put_user(l_type, &target_fl->l_type);
7006     __put_user(fl->l_whence, &target_fl->l_whence);
7007     __put_user(fl->l_start, &target_fl->l_start);
7008     __put_user(fl->l_len, &target_fl->l_len);
7009     __put_user(fl->l_pid, &target_fl->l_pid);
7010     unlock_user_struct(target_fl, target_flock_addr, 1);
7011     return 0;
7012 }
7013 
7014 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7015 {
7016     struct flock fl;
7017 #ifdef F_GETOWN_EX
7018     struct f_owner_ex fox;
7019     struct target_f_owner_ex *target_fox;
7020 #endif
7021     abi_long ret;
7022     int host_cmd = target_to_host_fcntl_cmd(cmd);
7023 
7024     if (host_cmd == -TARGET_EINVAL)
7025 	    return host_cmd;
7026 
7027     switch(cmd) {
7028     case TARGET_F_GETLK:
7029         ret = copy_from_user_flock(&fl, arg);
7030         if (ret) {
7031             return ret;
7032         }
7033         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7034         if (ret == 0) {
7035             ret = copy_to_user_flock(arg, &fl);
7036         }
7037         break;
7038 
7039     case TARGET_F_SETLK:
7040     case TARGET_F_SETLKW:
7041         ret = copy_from_user_flock(&fl, arg);
7042         if (ret) {
7043             return ret;
7044         }
7045         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7046         break;
7047 
7048     case TARGET_F_GETLK64:
7049     case TARGET_F_OFD_GETLK:
7050         ret = copy_from_user_flock64(&fl, arg);
7051         if (ret) {
7052             return ret;
7053         }
7054         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7055         if (ret == 0) {
7056             ret = copy_to_user_flock64(arg, &fl);
7057         }
7058         break;
7059     case TARGET_F_SETLK64:
7060     case TARGET_F_SETLKW64:
7061     case TARGET_F_OFD_SETLK:
7062     case TARGET_F_OFD_SETLKW:
7063         ret = copy_from_user_flock64(&fl, arg);
7064         if (ret) {
7065             return ret;
7066         }
7067         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7068         break;
7069 
7070     case TARGET_F_GETFL:
7071         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7072         if (ret >= 0) {
7073             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7074             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7075             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7076                 ret |= TARGET_O_LARGEFILE;
7077             }
7078         }
7079         break;
7080 
7081     case TARGET_F_SETFL:
7082         ret = get_errno(safe_fcntl(fd, host_cmd,
7083                                    target_to_host_bitmask(arg,
7084                                                           fcntl_flags_tbl)));
7085         break;
7086 
7087 #ifdef F_GETOWN_EX
7088     case TARGET_F_GETOWN_EX:
7089         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7090         if (ret >= 0) {
7091             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7092                 return -TARGET_EFAULT;
7093             target_fox->type = tswap32(fox.type);
7094             target_fox->pid = tswap32(fox.pid);
7095             unlock_user_struct(target_fox, arg, 1);
7096         }
7097         break;
7098 #endif
7099 
7100 #ifdef F_SETOWN_EX
7101     case TARGET_F_SETOWN_EX:
7102         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7103             return -TARGET_EFAULT;
7104         fox.type = tswap32(target_fox->type);
7105         fox.pid = tswap32(target_fox->pid);
7106         unlock_user_struct(target_fox, arg, 0);
7107         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7108         break;
7109 #endif
7110 
7111     case TARGET_F_SETSIG:
7112         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7113         break;
7114 
7115     case TARGET_F_GETSIG:
7116         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7117         break;
7118 
7119     case TARGET_F_SETOWN:
7120     case TARGET_F_GETOWN:
7121     case TARGET_F_SETLEASE:
7122     case TARGET_F_GETLEASE:
7123     case TARGET_F_SETPIPE_SZ:
7124     case TARGET_F_GETPIPE_SZ:
7125     case TARGET_F_ADD_SEALS:
7126     case TARGET_F_GET_SEALS:
7127         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7128         break;
7129 
7130     default:
7131         ret = get_errno(safe_fcntl(fd, cmd, arg));
7132         break;
7133     }
7134     return ret;
7135 }
7136 
7137 #ifdef USE_UID16
7138 
7139 static inline int high2lowuid(int uid)
7140 {
7141     if (uid > 65535)
7142         return 65534;
7143     else
7144         return uid;
7145 }
7146 
7147 static inline int high2lowgid(int gid)
7148 {
7149     if (gid > 65535)
7150         return 65534;
7151     else
7152         return gid;
7153 }
7154 
7155 static inline int low2highuid(int uid)
7156 {
7157     if ((int16_t)uid == -1)
7158         return -1;
7159     else
7160         return uid;
7161 }
7162 
7163 static inline int low2highgid(int gid)
7164 {
7165     if ((int16_t)gid == -1)
7166         return -1;
7167     else
7168         return gid;
7169 }
7170 static inline int tswapid(int id)
7171 {
7172     return tswap16(id);
7173 }
7174 
7175 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7176 
7177 #else /* !USE_UID16 */
7178 static inline int high2lowuid(int uid)
7179 {
7180     return uid;
7181 }
7182 static inline int high2lowgid(int gid)
7183 {
7184     return gid;
7185 }
7186 static inline int low2highuid(int uid)
7187 {
7188     return uid;
7189 }
7190 static inline int low2highgid(int gid)
7191 {
7192     return gid;
7193 }
7194 static inline int tswapid(int id)
7195 {
7196     return tswap32(id);
7197 }
7198 
7199 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7200 
7201 #endif /* USE_UID16 */
7202 
7203 /* We must do direct syscalls for setting UID/GID, because we want to
7204  * implement the Linux system call semantics of "change only for this thread",
7205  * not the libc/POSIX semantics of "change for all threads in process".
7206  * (See http://ewontfix.com/17/ for more details.)
7207  * We use the 32-bit version of the syscalls if present; if it is not
7208  * then either the host architecture supports 32-bit UIDs natively with
7209  * the standard syscall, or the 16-bit UID is the best we can do.
7210  */
7211 #ifdef __NR_setuid32
7212 #define __NR_sys_setuid __NR_setuid32
7213 #else
7214 #define __NR_sys_setuid __NR_setuid
7215 #endif
7216 #ifdef __NR_setgid32
7217 #define __NR_sys_setgid __NR_setgid32
7218 #else
7219 #define __NR_sys_setgid __NR_setgid
7220 #endif
7221 #ifdef __NR_setresuid32
7222 #define __NR_sys_setresuid __NR_setresuid32
7223 #else
7224 #define __NR_sys_setresuid __NR_setresuid
7225 #endif
7226 #ifdef __NR_setresgid32
7227 #define __NR_sys_setresgid __NR_setresgid32
7228 #else
7229 #define __NR_sys_setresgid __NR_setresgid
7230 #endif
7231 #ifdef __NR_setgroups32
7232 #define __NR_sys_setgroups __NR_setgroups32
7233 #else
7234 #define __NR_sys_setgroups __NR_setgroups
7235 #endif
7236 
7237 _syscall1(int, sys_setuid, uid_t, uid)
7238 _syscall1(int, sys_setgid, gid_t, gid)
7239 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7240 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7241 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7242 
7243 void syscall_init(void)
7244 {
7245     IOCTLEntry *ie;
7246     const argtype *arg_type;
7247     int size;
7248 
7249     thunk_init(STRUCT_MAX);
7250 
7251 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7252 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7253 #include "syscall_types.h"
7254 #undef STRUCT
7255 #undef STRUCT_SPECIAL
7256 
7257     /* we patch the ioctl size if necessary. We rely on the fact that
7258        no ioctl has all the bits at '1' in the size field */
7259     ie = ioctl_entries;
7260     while (ie->target_cmd != 0) {
7261         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7262             TARGET_IOC_SIZEMASK) {
7263             arg_type = ie->arg_type;
7264             if (arg_type[0] != TYPE_PTR) {
7265                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7266                         ie->target_cmd);
7267                 exit(1);
7268             }
7269             arg_type++;
7270             size = thunk_type_size(arg_type, 0);
7271             ie->target_cmd = (ie->target_cmd &
7272                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7273                 (size << TARGET_IOC_SIZESHIFT);
7274         }
7275 
7276         /* automatic consistency check if same arch */
7277 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7278     (defined(__x86_64__) && defined(TARGET_X86_64))
7279         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7280             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7281                     ie->name, ie->target_cmd, ie->host_cmd);
7282         }
7283 #endif
7284         ie++;
7285     }
7286 }
7287 
7288 #ifdef TARGET_NR_truncate64
7289 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7290                                          abi_long arg2,
7291                                          abi_long arg3,
7292                                          abi_long arg4)
7293 {
7294     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7295         arg2 = arg3;
7296         arg3 = arg4;
7297     }
7298     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7299 }
7300 #endif
7301 
7302 #ifdef TARGET_NR_ftruncate64
7303 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7304                                           abi_long arg2,
7305                                           abi_long arg3,
7306                                           abi_long arg4)
7307 {
7308     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7309         arg2 = arg3;
7310         arg3 = arg4;
7311     }
7312     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7313 }
7314 #endif
7315 
7316 #if defined(TARGET_NR_timer_settime) || \
7317     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7318 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7319                                                  abi_ulong target_addr)
7320 {
7321     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7322                                 offsetof(struct target_itimerspec,
7323                                          it_interval)) ||
7324         target_to_host_timespec(&host_its->it_value, target_addr +
7325                                 offsetof(struct target_itimerspec,
7326                                          it_value))) {
7327         return -TARGET_EFAULT;
7328     }
7329 
7330     return 0;
7331 }
7332 #endif
7333 
7334 #if defined(TARGET_NR_timer_settime64) || \
7335     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7336 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7337                                                    abi_ulong target_addr)
7338 {
7339     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7340                                   offsetof(struct target__kernel_itimerspec,
7341                                            it_interval)) ||
7342         target_to_host_timespec64(&host_its->it_value, target_addr +
7343                                   offsetof(struct target__kernel_itimerspec,
7344                                            it_value))) {
7345         return -TARGET_EFAULT;
7346     }
7347 
7348     return 0;
7349 }
7350 #endif
7351 
7352 #if ((defined(TARGET_NR_timerfd_gettime) || \
7353       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7354       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7355 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7356                                                  struct itimerspec *host_its)
7357 {
7358     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7359                                                        it_interval),
7360                                 &host_its->it_interval) ||
7361         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7362                                                        it_value),
7363                                 &host_its->it_value)) {
7364         return -TARGET_EFAULT;
7365     }
7366     return 0;
7367 }
7368 #endif
7369 
7370 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7371       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7372       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7373 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7374                                                    struct itimerspec *host_its)
7375 {
7376     if (host_to_target_timespec64(target_addr +
7377                                   offsetof(struct target__kernel_itimerspec,
7378                                            it_interval),
7379                                   &host_its->it_interval) ||
7380         host_to_target_timespec64(target_addr +
7381                                   offsetof(struct target__kernel_itimerspec,
7382                                            it_value),
7383                                   &host_its->it_value)) {
7384         return -TARGET_EFAULT;
7385     }
7386     return 0;
7387 }
7388 #endif
7389 
7390 #if defined(TARGET_NR_adjtimex) || \
7391     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7392 static inline abi_long target_to_host_timex(struct timex *host_tx,
7393                                             abi_long target_addr)
7394 {
7395     struct target_timex *target_tx;
7396 
7397     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7398         return -TARGET_EFAULT;
7399     }
7400 
7401     __get_user(host_tx->modes, &target_tx->modes);
7402     __get_user(host_tx->offset, &target_tx->offset);
7403     __get_user(host_tx->freq, &target_tx->freq);
7404     __get_user(host_tx->maxerror, &target_tx->maxerror);
7405     __get_user(host_tx->esterror, &target_tx->esterror);
7406     __get_user(host_tx->status, &target_tx->status);
7407     __get_user(host_tx->constant, &target_tx->constant);
7408     __get_user(host_tx->precision, &target_tx->precision);
7409     __get_user(host_tx->tolerance, &target_tx->tolerance);
7410     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7411     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7412     __get_user(host_tx->tick, &target_tx->tick);
7413     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7414     __get_user(host_tx->jitter, &target_tx->jitter);
7415     __get_user(host_tx->shift, &target_tx->shift);
7416     __get_user(host_tx->stabil, &target_tx->stabil);
7417     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7418     __get_user(host_tx->calcnt, &target_tx->calcnt);
7419     __get_user(host_tx->errcnt, &target_tx->errcnt);
7420     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7421     __get_user(host_tx->tai, &target_tx->tai);
7422 
7423     unlock_user_struct(target_tx, target_addr, 0);
7424     return 0;
7425 }
7426 
7427 static inline abi_long host_to_target_timex(abi_long target_addr,
7428                                             struct timex *host_tx)
7429 {
7430     struct target_timex *target_tx;
7431 
7432     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7433         return -TARGET_EFAULT;
7434     }
7435 
7436     __put_user(host_tx->modes, &target_tx->modes);
7437     __put_user(host_tx->offset, &target_tx->offset);
7438     __put_user(host_tx->freq, &target_tx->freq);
7439     __put_user(host_tx->maxerror, &target_tx->maxerror);
7440     __put_user(host_tx->esterror, &target_tx->esterror);
7441     __put_user(host_tx->status, &target_tx->status);
7442     __put_user(host_tx->constant, &target_tx->constant);
7443     __put_user(host_tx->precision, &target_tx->precision);
7444     __put_user(host_tx->tolerance, &target_tx->tolerance);
7445     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7446     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7447     __put_user(host_tx->tick, &target_tx->tick);
7448     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7449     __put_user(host_tx->jitter, &target_tx->jitter);
7450     __put_user(host_tx->shift, &target_tx->shift);
7451     __put_user(host_tx->stabil, &target_tx->stabil);
7452     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7453     __put_user(host_tx->calcnt, &target_tx->calcnt);
7454     __put_user(host_tx->errcnt, &target_tx->errcnt);
7455     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7456     __put_user(host_tx->tai, &target_tx->tai);
7457 
7458     unlock_user_struct(target_tx, target_addr, 1);
7459     return 0;
7460 }
7461 #endif
7462 
7463 
7464 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7465 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7466                                               abi_long target_addr)
7467 {
7468     struct target__kernel_timex *target_tx;
7469 
7470     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7471                                  offsetof(struct target__kernel_timex,
7472                                           time))) {
7473         return -TARGET_EFAULT;
7474     }
7475 
7476     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7477         return -TARGET_EFAULT;
7478     }
7479 
7480     __get_user(host_tx->modes, &target_tx->modes);
7481     __get_user(host_tx->offset, &target_tx->offset);
7482     __get_user(host_tx->freq, &target_tx->freq);
7483     __get_user(host_tx->maxerror, &target_tx->maxerror);
7484     __get_user(host_tx->esterror, &target_tx->esterror);
7485     __get_user(host_tx->status, &target_tx->status);
7486     __get_user(host_tx->constant, &target_tx->constant);
7487     __get_user(host_tx->precision, &target_tx->precision);
7488     __get_user(host_tx->tolerance, &target_tx->tolerance);
7489     __get_user(host_tx->tick, &target_tx->tick);
7490     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7491     __get_user(host_tx->jitter, &target_tx->jitter);
7492     __get_user(host_tx->shift, &target_tx->shift);
7493     __get_user(host_tx->stabil, &target_tx->stabil);
7494     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7495     __get_user(host_tx->calcnt, &target_tx->calcnt);
7496     __get_user(host_tx->errcnt, &target_tx->errcnt);
7497     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7498     __get_user(host_tx->tai, &target_tx->tai);
7499 
7500     unlock_user_struct(target_tx, target_addr, 0);
7501     return 0;
7502 }
7503 
7504 static inline abi_long host_to_target_timex64(abi_long target_addr,
7505                                               struct timex *host_tx)
7506 {
7507     struct target__kernel_timex *target_tx;
7508 
7509    if (copy_to_user_timeval64(target_addr +
7510                               offsetof(struct target__kernel_timex, time),
7511                               &host_tx->time)) {
7512         return -TARGET_EFAULT;
7513     }
7514 
7515     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7516         return -TARGET_EFAULT;
7517     }
7518 
7519     __put_user(host_tx->modes, &target_tx->modes);
7520     __put_user(host_tx->offset, &target_tx->offset);
7521     __put_user(host_tx->freq, &target_tx->freq);
7522     __put_user(host_tx->maxerror, &target_tx->maxerror);
7523     __put_user(host_tx->esterror, &target_tx->esterror);
7524     __put_user(host_tx->status, &target_tx->status);
7525     __put_user(host_tx->constant, &target_tx->constant);
7526     __put_user(host_tx->precision, &target_tx->precision);
7527     __put_user(host_tx->tolerance, &target_tx->tolerance);
7528     __put_user(host_tx->tick, &target_tx->tick);
7529     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7530     __put_user(host_tx->jitter, &target_tx->jitter);
7531     __put_user(host_tx->shift, &target_tx->shift);
7532     __put_user(host_tx->stabil, &target_tx->stabil);
7533     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7534     __put_user(host_tx->calcnt, &target_tx->calcnt);
7535     __put_user(host_tx->errcnt, &target_tx->errcnt);
7536     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7537     __put_user(host_tx->tai, &target_tx->tai);
7538 
7539     unlock_user_struct(target_tx, target_addr, 1);
7540     return 0;
7541 }
7542 #endif
7543 
7544 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7545 #define sigev_notify_thread_id _sigev_un._tid
7546 #endif
7547 
7548 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7549                                                abi_ulong target_addr)
7550 {
7551     struct target_sigevent *target_sevp;
7552 
7553     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7554         return -TARGET_EFAULT;
7555     }
7556 
7557     /* This union is awkward on 64 bit systems because it has a 32 bit
7558      * integer and a pointer in it; we follow the conversion approach
7559      * used for handling sigval types in signal.c so the guest should get
7560      * the correct value back even if we did a 64 bit byteswap and it's
7561      * using the 32 bit integer.
7562      */
7563     host_sevp->sigev_value.sival_ptr =
7564         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7565     host_sevp->sigev_signo =
7566         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7567     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7568     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7569 
7570     unlock_user_struct(target_sevp, target_addr, 1);
7571     return 0;
7572 }
7573 
7574 #if defined(TARGET_NR_mlockall)
7575 static inline int target_to_host_mlockall_arg(int arg)
7576 {
7577     int result = 0;
7578 
7579     if (arg & TARGET_MCL_CURRENT) {
7580         result |= MCL_CURRENT;
7581     }
7582     if (arg & TARGET_MCL_FUTURE) {
7583         result |= MCL_FUTURE;
7584     }
7585 #ifdef MCL_ONFAULT
7586     if (arg & TARGET_MCL_ONFAULT) {
7587         result |= MCL_ONFAULT;
7588     }
7589 #endif
7590 
7591     return result;
7592 }
7593 #endif
7594 
7595 static inline int target_to_host_msync_arg(abi_long arg)
7596 {
7597     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7598            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7599            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7600            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7601 }
7602 
7603 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7604      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7605      defined(TARGET_NR_newfstatat))
7606 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7607                                              abi_ulong target_addr,
7608                                              struct stat *host_st)
7609 {
7610 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7611     if (cpu_env->eabi) {
7612         struct target_eabi_stat64 *target_st;
7613 
7614         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7615             return -TARGET_EFAULT;
7616         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7617         __put_user(host_st->st_dev, &target_st->st_dev);
7618         __put_user(host_st->st_ino, &target_st->st_ino);
7619 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7620         __put_user(host_st->st_ino, &target_st->__st_ino);
7621 #endif
7622         __put_user(host_st->st_mode, &target_st->st_mode);
7623         __put_user(host_st->st_nlink, &target_st->st_nlink);
7624         __put_user(host_st->st_uid, &target_st->st_uid);
7625         __put_user(host_st->st_gid, &target_st->st_gid);
7626         __put_user(host_st->st_rdev, &target_st->st_rdev);
7627         __put_user(host_st->st_size, &target_st->st_size);
7628         __put_user(host_st->st_blksize, &target_st->st_blksize);
7629         __put_user(host_st->st_blocks, &target_st->st_blocks);
7630         __put_user(host_st->st_atime, &target_st->target_st_atime);
7631         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7632         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7633 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7634         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7635         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7636         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7637 #endif
7638         unlock_user_struct(target_st, target_addr, 1);
7639     } else
7640 #endif
7641     {
7642 #if defined(TARGET_HAS_STRUCT_STAT64)
7643         struct target_stat64 *target_st;
7644 #else
7645         struct target_stat *target_st;
7646 #endif
7647 
7648         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7649             return -TARGET_EFAULT;
7650         memset(target_st, 0, sizeof(*target_st));
7651         __put_user(host_st->st_dev, &target_st->st_dev);
7652         __put_user(host_st->st_ino, &target_st->st_ino);
7653 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7654         __put_user(host_st->st_ino, &target_st->__st_ino);
7655 #endif
7656         __put_user(host_st->st_mode, &target_st->st_mode);
7657         __put_user(host_st->st_nlink, &target_st->st_nlink);
7658         __put_user(host_st->st_uid, &target_st->st_uid);
7659         __put_user(host_st->st_gid, &target_st->st_gid);
7660         __put_user(host_st->st_rdev, &target_st->st_rdev);
7661         /* XXX: better use of kernel struct */
7662         __put_user(host_st->st_size, &target_st->st_size);
7663         __put_user(host_st->st_blksize, &target_st->st_blksize);
7664         __put_user(host_st->st_blocks, &target_st->st_blocks);
7665         __put_user(host_st->st_atime, &target_st->target_st_atime);
7666         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7667         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7668 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7669         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7670         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7671         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7672 #endif
7673         unlock_user_struct(target_st, target_addr, 1);
7674     }
7675 
7676     return 0;
7677 }
7678 #endif
7679 
7680 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7681 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7682                                             abi_ulong target_addr)
7683 {
7684     struct target_statx *target_stx;
7685 
7686     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7687         return -TARGET_EFAULT;
7688     }
7689     memset(target_stx, 0, sizeof(*target_stx));
7690 
7691     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7692     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7693     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7694     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7695     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7696     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7697     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7698     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7699     __put_user(host_stx->stx_size, &target_stx->stx_size);
7700     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7701     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7702     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7703     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7704     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7705     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7706     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7707     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7708     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7709     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7710     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7711     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7712     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7713     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7714 
7715     unlock_user_struct(target_stx, target_addr, 1);
7716 
7717     return 0;
7718 }
7719 #endif
7720 
7721 static int do_sys_futex(int *uaddr, int op, int val,
7722                          const struct timespec *timeout, int *uaddr2,
7723                          int val3)
7724 {
7725 #if HOST_LONG_BITS == 64
7726 #if defined(__NR_futex)
7727     /* always a 64-bit time_t, it doesn't define _time64 version  */
7728     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7729 
7730 #endif
7731 #else /* HOST_LONG_BITS == 64 */
7732 #if defined(__NR_futex_time64)
7733     if (sizeof(timeout->tv_sec) == 8) {
7734         /* _time64 function on 32bit arch */
7735         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7736     }
7737 #endif
7738 #if defined(__NR_futex)
7739     /* old function on 32bit arch */
7740     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7741 #endif
7742 #endif /* HOST_LONG_BITS == 64 */
7743     g_assert_not_reached();
7744 }
7745 
7746 static int do_safe_futex(int *uaddr, int op, int val,
7747                          const struct timespec *timeout, int *uaddr2,
7748                          int val3)
7749 {
7750 #if HOST_LONG_BITS == 64
7751 #if defined(__NR_futex)
7752     /* always a 64-bit time_t, it doesn't define _time64 version  */
7753     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7754 #endif
7755 #else /* HOST_LONG_BITS == 64 */
7756 #if defined(__NR_futex_time64)
7757     if (sizeof(timeout->tv_sec) == 8) {
7758         /* _time64 function on 32bit arch */
7759         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7760                                            val3));
7761     }
7762 #endif
7763 #if defined(__NR_futex)
7764     /* old function on 32bit arch */
7765     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7766 #endif
7767 #endif /* HOST_LONG_BITS == 64 */
7768     return -TARGET_ENOSYS;
7769 }
7770 
7771 /* ??? Using host futex calls even when target atomic operations
7772    are not really atomic probably breaks things.  However implementing
7773    futexes locally would make futexes shared between multiple processes
7774    tricky.  However they're probably useless because guest atomic
7775    operations won't work either.  */
7776 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7777 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7778                     int op, int val, target_ulong timeout,
7779                     target_ulong uaddr2, int val3)
7780 {
7781     struct timespec ts, *pts = NULL;
7782     void *haddr2 = NULL;
7783     int base_op;
7784 
7785     /* We assume FUTEX_* constants are the same on both host and target. */
7786 #ifdef FUTEX_CMD_MASK
7787     base_op = op & FUTEX_CMD_MASK;
7788 #else
7789     base_op = op;
7790 #endif
7791     switch (base_op) {
7792     case FUTEX_WAIT:
7793     case FUTEX_WAIT_BITSET:
7794         val = tswap32(val);
7795         break;
7796     case FUTEX_WAIT_REQUEUE_PI:
7797         val = tswap32(val);
7798         haddr2 = g2h(cpu, uaddr2);
7799         break;
7800     case FUTEX_LOCK_PI:
7801     case FUTEX_LOCK_PI2:
7802         break;
7803     case FUTEX_WAKE:
7804     case FUTEX_WAKE_BITSET:
7805     case FUTEX_TRYLOCK_PI:
7806     case FUTEX_UNLOCK_PI:
7807         timeout = 0;
7808         break;
7809     case FUTEX_FD:
7810         val = target_to_host_signal(val);
7811         timeout = 0;
7812         break;
7813     case FUTEX_CMP_REQUEUE:
7814     case FUTEX_CMP_REQUEUE_PI:
7815         val3 = tswap32(val3);
7816         /* fall through */
7817     case FUTEX_REQUEUE:
7818     case FUTEX_WAKE_OP:
7819         /*
7820          * For these, the 4th argument is not TIMEOUT, but VAL2.
7821          * But the prototype of do_safe_futex takes a pointer, so
7822          * insert casts to satisfy the compiler.  We do not need
7823          * to tswap VAL2 since it's not compared to guest memory.
7824           */
7825         pts = (struct timespec *)(uintptr_t)timeout;
7826         timeout = 0;
7827         haddr2 = g2h(cpu, uaddr2);
7828         break;
7829     default:
7830         return -TARGET_ENOSYS;
7831     }
7832     if (timeout) {
7833         pts = &ts;
7834         if (time64
7835             ? target_to_host_timespec64(pts, timeout)
7836             : target_to_host_timespec(pts, timeout)) {
7837             return -TARGET_EFAULT;
7838         }
7839     }
7840     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7841 }
7842 #endif
7843 
7844 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7845 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7846                                      abi_long handle, abi_long mount_id,
7847                                      abi_long flags)
7848 {
7849     struct file_handle *target_fh;
7850     struct file_handle *fh;
7851     int mid = 0;
7852     abi_long ret;
7853     char *name;
7854     unsigned int size, total_size;
7855 
7856     if (get_user_s32(size, handle)) {
7857         return -TARGET_EFAULT;
7858     }
7859 
7860     name = lock_user_string(pathname);
7861     if (!name) {
7862         return -TARGET_EFAULT;
7863     }
7864 
7865     total_size = sizeof(struct file_handle) + size;
7866     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7867     if (!target_fh) {
7868         unlock_user(name, pathname, 0);
7869         return -TARGET_EFAULT;
7870     }
7871 
7872     fh = g_malloc0(total_size);
7873     fh->handle_bytes = size;
7874 
7875     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7876     unlock_user(name, pathname, 0);
7877 
7878     /* man name_to_handle_at(2):
7879      * Other than the use of the handle_bytes field, the caller should treat
7880      * the file_handle structure as an opaque data type
7881      */
7882 
7883     memcpy(target_fh, fh, total_size);
7884     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7885     target_fh->handle_type = tswap32(fh->handle_type);
7886     g_free(fh);
7887     unlock_user(target_fh, handle, total_size);
7888 
7889     if (put_user_s32(mid, mount_id)) {
7890         return -TARGET_EFAULT;
7891     }
7892 
7893     return ret;
7894 
7895 }
7896 #endif
7897 
7898 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7899 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7900                                      abi_long flags)
7901 {
7902     struct file_handle *target_fh;
7903     struct file_handle *fh;
7904     unsigned int size, total_size;
7905     abi_long ret;
7906 
7907     if (get_user_s32(size, handle)) {
7908         return -TARGET_EFAULT;
7909     }
7910 
7911     total_size = sizeof(struct file_handle) + size;
7912     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7913     if (!target_fh) {
7914         return -TARGET_EFAULT;
7915     }
7916 
7917     fh = g_memdup(target_fh, total_size);
7918     fh->handle_bytes = size;
7919     fh->handle_type = tswap32(target_fh->handle_type);
7920 
7921     ret = get_errno(open_by_handle_at(mount_fd, fh,
7922                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7923 
7924     g_free(fh);
7925 
7926     unlock_user(target_fh, handle, total_size);
7927 
7928     return ret;
7929 }
7930 #endif
7931 
7932 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7933 
7934 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7935 {
7936     int host_flags;
7937     target_sigset_t *target_mask;
7938     sigset_t host_mask;
7939     abi_long ret;
7940 
7941     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7942         return -TARGET_EINVAL;
7943     }
7944     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7945         return -TARGET_EFAULT;
7946     }
7947 
7948     target_to_host_sigset(&host_mask, target_mask);
7949 
7950     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7951 
7952     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7953     if (ret >= 0) {
7954         fd_trans_register(ret, &target_signalfd_trans);
7955     }
7956 
7957     unlock_user_struct(target_mask, mask, 0);
7958 
7959     return ret;
7960 }
7961 #endif
7962 
7963 /* Map host to target signal numbers for the wait family of syscalls.
7964    Assume all other status bits are the same.  */
7965 int host_to_target_waitstatus(int status)
7966 {
7967     if (WIFSIGNALED(status)) {
7968         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7969     }
7970     if (WIFSTOPPED(status)) {
7971         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7972                | (status & 0xff);
7973     }
7974     return status;
7975 }
7976 
7977 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7978 {
7979     CPUState *cpu = env_cpu(cpu_env);
7980     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
7981     int i;
7982 
7983     for (i = 0; i < bprm->argc; i++) {
7984         size_t len = strlen(bprm->argv[i]) + 1;
7985 
7986         if (write(fd, bprm->argv[i], len) != len) {
7987             return -1;
7988         }
7989     }
7990 
7991     return 0;
7992 }
7993 
7994 struct open_self_maps_data {
7995     TaskState *ts;
7996     IntervalTreeRoot *host_maps;
7997     int fd;
7998     bool smaps;
7999 };
8000 
8001 /*
8002  * Subroutine to output one line of /proc/self/maps,
8003  * or one region of /proc/self/smaps.
8004  */
8005 
8006 #ifdef TARGET_HPPA
8007 # define test_stack(S, E, L)  (E == L)
8008 #else
8009 # define test_stack(S, E, L)  (S == L)
8010 #endif
8011 
8012 static void open_self_maps_4(const struct open_self_maps_data *d,
8013                              const MapInfo *mi, abi_ptr start,
8014                              abi_ptr end, unsigned flags)
8015 {
8016     const struct image_info *info = d->ts->info;
8017     const char *path = mi->path;
8018     uint64_t offset;
8019     int fd = d->fd;
8020     int count;
8021 
8022     if (test_stack(start, end, info->stack_limit)) {
8023         path = "[stack]";
8024     } else if (start == info->brk) {
8025         path = "[heap]";
8026     } else if (start == info->vdso) {
8027         path = "[vdso]";
8028 #ifdef TARGET_X86_64
8029     } else if (start == TARGET_VSYSCALL_PAGE) {
8030         path = "[vsyscall]";
8031 #endif
8032     }
8033 
8034     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8035     offset = mi->offset;
8036     if (mi->dev) {
8037         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8038         offset += hstart - mi->itree.start;
8039     }
8040 
8041     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8042                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8043                     start, end,
8044                     (flags & PAGE_READ) ? 'r' : '-',
8045                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8046                     (flags & PAGE_EXEC) ? 'x' : '-',
8047                     mi->is_priv ? 'p' : 's',
8048                     offset, major(mi->dev), minor(mi->dev),
8049                     (uint64_t)mi->inode);
8050     if (path) {
8051         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8052     } else {
8053         dprintf(fd, "\n");
8054     }
8055 
8056     if (d->smaps) {
8057         unsigned long size = end - start;
8058         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8059         unsigned long size_kb = size >> 10;
8060 
8061         dprintf(fd, "Size:                  %lu kB\n"
8062                 "KernelPageSize:        %lu kB\n"
8063                 "MMUPageSize:           %lu kB\n"
8064                 "Rss:                   0 kB\n"
8065                 "Pss:                   0 kB\n"
8066                 "Pss_Dirty:             0 kB\n"
8067                 "Shared_Clean:          0 kB\n"
8068                 "Shared_Dirty:          0 kB\n"
8069                 "Private_Clean:         0 kB\n"
8070                 "Private_Dirty:         0 kB\n"
8071                 "Referenced:            0 kB\n"
8072                 "Anonymous:             %lu kB\n"
8073                 "LazyFree:              0 kB\n"
8074                 "AnonHugePages:         0 kB\n"
8075                 "ShmemPmdMapped:        0 kB\n"
8076                 "FilePmdMapped:         0 kB\n"
8077                 "Shared_Hugetlb:        0 kB\n"
8078                 "Private_Hugetlb:       0 kB\n"
8079                 "Swap:                  0 kB\n"
8080                 "SwapPss:               0 kB\n"
8081                 "Locked:                0 kB\n"
8082                 "THPeligible:    0\n"
8083                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8084                 size_kb, page_size_kb, page_size_kb,
8085                 (flags & PAGE_ANON ? size_kb : 0),
8086                 (flags & PAGE_READ) ? " rd" : "",
8087                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8088                 (flags & PAGE_EXEC) ? " ex" : "",
8089                 mi->is_priv ? "" : " sh",
8090                 (flags & PAGE_READ) ? " mr" : "",
8091                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8092                 (flags & PAGE_EXEC) ? " me" : "",
8093                 mi->is_priv ? "" : " ms");
8094     }
8095 }
8096 
8097 /*
8098  * Callback for walk_memory_regions, when read_self_maps() fails.
8099  * Proceed without the benefit of host /proc/self/maps cross-check.
8100  */
8101 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8102                             target_ulong guest_end, unsigned long flags)
8103 {
8104     static const MapInfo mi = { .is_priv = true };
8105 
8106     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8107     return 0;
8108 }
8109 
8110 /*
8111  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8112  */
8113 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8114                             target_ulong guest_end, unsigned long flags)
8115 {
8116     const struct open_self_maps_data *d = opaque;
8117     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8118     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8119 
8120 #ifdef TARGET_X86_64
8121     /*
8122      * Because of the extremely high position of the page within the guest
8123      * virtual address space, this is not backed by host memory at all.
8124      * Therefore the loop below would fail.  This is the only instance
8125      * of not having host backing memory.
8126      */
8127     if (guest_start == TARGET_VSYSCALL_PAGE) {
8128         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8129     }
8130 #endif
8131 
8132     while (1) {
8133         IntervalTreeNode *n =
8134             interval_tree_iter_first(d->host_maps, host_start, host_start);
8135         MapInfo *mi = container_of(n, MapInfo, itree);
8136         uintptr_t this_hlast = MIN(host_last, n->last);
8137         target_ulong this_gend = h2g(this_hlast) + 1;
8138 
8139         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8140 
8141         if (this_hlast == host_last) {
8142             return 0;
8143         }
8144         host_start = this_hlast + 1;
8145         guest_start = h2g(host_start);
8146     }
8147 }
8148 
8149 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8150 {
8151     struct open_self_maps_data d = {
8152         .ts = get_task_state(env_cpu(env)),
8153         .host_maps = read_self_maps(),
8154         .fd = fd,
8155         .smaps = smaps
8156     };
8157 
8158     if (d.host_maps) {
8159         walk_memory_regions(&d, open_self_maps_2);
8160         free_self_maps(d.host_maps);
8161     } else {
8162         walk_memory_regions(&d, open_self_maps_3);
8163     }
8164     return 0;
8165 }
8166 
8167 static int open_self_maps(CPUArchState *cpu_env, int fd)
8168 {
8169     return open_self_maps_1(cpu_env, fd, false);
8170 }
8171 
8172 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8173 {
8174     return open_self_maps_1(cpu_env, fd, true);
8175 }
8176 
8177 static int open_self_stat(CPUArchState *cpu_env, int fd)
8178 {
8179     CPUState *cpu = env_cpu(cpu_env);
8180     TaskState *ts = get_task_state(cpu);
8181     g_autoptr(GString) buf = g_string_new(NULL);
8182     int i;
8183 
8184     for (i = 0; i < 44; i++) {
8185         if (i == 0) {
8186             /* pid */
8187             g_string_printf(buf, FMT_pid " ", getpid());
8188         } else if (i == 1) {
8189             /* app name */
8190             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8191             bin = bin ? bin + 1 : ts->bprm->argv[0];
8192             g_string_printf(buf, "(%.15s) ", bin);
8193         } else if (i == 2) {
8194             /* task state */
8195             g_string_assign(buf, "R "); /* we are running right now */
8196         } else if (i == 3) {
8197             /* ppid */
8198             g_string_printf(buf, FMT_pid " ", getppid());
8199         } else if (i == 19) {
8200             /* num_threads */
8201             int cpus = 0;
8202             WITH_RCU_READ_LOCK_GUARD() {
8203                 CPUState *cpu_iter;
8204                 CPU_FOREACH(cpu_iter) {
8205                     cpus++;
8206                 }
8207             }
8208             g_string_printf(buf, "%d ", cpus);
8209         } else if (i == 21) {
8210             /* starttime */
8211             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8212         } else if (i == 27) {
8213             /* stack bottom */
8214             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8215         } else {
8216             /* for the rest, there is MasterCard */
8217             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8218         }
8219 
8220         if (write(fd, buf->str, buf->len) != buf->len) {
8221             return -1;
8222         }
8223     }
8224 
8225     return 0;
8226 }
8227 
8228 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8229 {
8230     CPUState *cpu = env_cpu(cpu_env);
8231     TaskState *ts = get_task_state(cpu);
8232     abi_ulong auxv = ts->info->saved_auxv;
8233     abi_ulong len = ts->info->auxv_len;
8234     char *ptr;
8235 
8236     /*
8237      * Auxiliary vector is stored in target process stack.
8238      * read in whole auxv vector and copy it to file
8239      */
8240     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8241     if (ptr != NULL) {
8242         while (len > 0) {
8243             ssize_t r;
8244             r = write(fd, ptr, len);
8245             if (r <= 0) {
8246                 break;
8247             }
8248             len -= r;
8249             ptr += r;
8250         }
8251         lseek(fd, 0, SEEK_SET);
8252         unlock_user(ptr, auxv, len);
8253     }
8254 
8255     return 0;
8256 }
8257 
8258 static int is_proc_myself(const char *filename, const char *entry)
8259 {
8260     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8261         filename += strlen("/proc/");
8262         if (!strncmp(filename, "self/", strlen("self/"))) {
8263             filename += strlen("self/");
8264         } else if (*filename >= '1' && *filename <= '9') {
8265             char myself[80];
8266             snprintf(myself, sizeof(myself), "%d/", getpid());
8267             if (!strncmp(filename, myself, strlen(myself))) {
8268                 filename += strlen(myself);
8269             } else {
8270                 return 0;
8271             }
8272         } else {
8273             return 0;
8274         }
8275         if (!strcmp(filename, entry)) {
8276             return 1;
8277         }
8278     }
8279     return 0;
8280 }
8281 
8282 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8283                       const char *fmt, int code)
8284 {
8285     if (logfile) {
8286         CPUState *cs = env_cpu(env);
8287 
8288         fprintf(logfile, fmt, code);
8289         fprintf(logfile, "Failing executable: %s\n", exec_path);
8290         cpu_dump_state(cs, logfile, 0);
8291         open_self_maps(env, fileno(logfile));
8292     }
8293 }
8294 
8295 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8296 {
8297     /* dump to console */
8298     excp_dump_file(stderr, env, fmt, code);
8299 
8300     /* dump to log file */
8301     if (qemu_log_separate()) {
8302         FILE *logfile = qemu_log_trylock();
8303 
8304         excp_dump_file(logfile, env, fmt, code);
8305         qemu_log_unlock(logfile);
8306     }
8307 }
8308 
8309 #include "target_proc.h"
8310 
8311 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8312     defined(HAVE_ARCH_PROC_CPUINFO) || \
8313     defined(HAVE_ARCH_PROC_HARDWARE)
8314 static int is_proc(const char *filename, const char *entry)
8315 {
8316     return strcmp(filename, entry) == 0;
8317 }
8318 #endif
8319 
8320 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8321 static int open_net_route(CPUArchState *cpu_env, int fd)
8322 {
8323     FILE *fp;
8324     char *line = NULL;
8325     size_t len = 0;
8326     ssize_t read;
8327 
8328     fp = fopen("/proc/net/route", "r");
8329     if (fp == NULL) {
8330         return -1;
8331     }
8332 
8333     /* read header */
8334 
8335     read = getline(&line, &len, fp);
8336     dprintf(fd, "%s", line);
8337 
8338     /* read routes */
8339 
8340     while ((read = getline(&line, &len, fp)) != -1) {
8341         char iface[16];
8342         uint32_t dest, gw, mask;
8343         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8344         int fields;
8345 
8346         fields = sscanf(line,
8347                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8348                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8349                         &mask, &mtu, &window, &irtt);
8350         if (fields != 11) {
8351             continue;
8352         }
8353         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8354                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8355                 metric, tswap32(mask), mtu, window, irtt);
8356     }
8357 
8358     free(line);
8359     fclose(fp);
8360 
8361     return 0;
8362 }
8363 #endif
8364 
8365 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8366                               const char *fname, int flags, mode_t mode,
8367                               int openat2_resolve, bool safe)
8368 {
8369     g_autofree char *proc_name = NULL;
8370     const char *pathname;
8371     struct fake_open {
8372         const char *filename;
8373         int (*fill)(CPUArchState *cpu_env, int fd);
8374         int (*cmp)(const char *s1, const char *s2);
8375     };
8376     const struct fake_open *fake_open;
8377     static const struct fake_open fakes[] = {
8378         { "maps", open_self_maps, is_proc_myself },
8379         { "smaps", open_self_smaps, is_proc_myself },
8380         { "stat", open_self_stat, is_proc_myself },
8381         { "auxv", open_self_auxv, is_proc_myself },
8382         { "cmdline", open_self_cmdline, is_proc_myself },
8383 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8384         { "/proc/net/route", open_net_route, is_proc },
8385 #endif
8386 #if defined(HAVE_ARCH_PROC_CPUINFO)
8387         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8388 #endif
8389 #if defined(HAVE_ARCH_PROC_HARDWARE)
8390         { "/proc/hardware", open_hardware, is_proc },
8391 #endif
8392         { NULL, NULL, NULL }
8393     };
8394 
8395     /* if this is a file from /proc/ filesystem, expand full name */
8396     proc_name = realpath(fname, NULL);
8397     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8398         pathname = proc_name;
8399     } else {
8400         pathname = fname;
8401     }
8402 
8403     if (is_proc_myself(pathname, "exe")) {
8404         /* Honor openat2 resolve flags */
8405         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8406             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8407             errno = ELOOP;
8408             return -1;
8409         }
8410         if (safe) {
8411             return safe_openat(dirfd, exec_path, flags, mode);
8412         } else {
8413             return openat(dirfd, exec_path, flags, mode);
8414         }
8415     }
8416 
8417     for (fake_open = fakes; fake_open->filename; fake_open++) {
8418         if (fake_open->cmp(pathname, fake_open->filename)) {
8419             break;
8420         }
8421     }
8422 
8423     if (fake_open->filename) {
8424         const char *tmpdir;
8425         char filename[PATH_MAX];
8426         int fd, r;
8427 
8428         fd = memfd_create("qemu-open", 0);
8429         if (fd < 0) {
8430             if (errno != ENOSYS) {
8431                 return fd;
8432             }
8433             /* create temporary file to map stat to */
8434             tmpdir = getenv("TMPDIR");
8435             if (!tmpdir)
8436                 tmpdir = "/tmp";
8437             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8438             fd = mkstemp(filename);
8439             if (fd < 0) {
8440                 return fd;
8441             }
8442             unlink(filename);
8443         }
8444 
8445         if ((r = fake_open->fill(cpu_env, fd))) {
8446             int e = errno;
8447             close(fd);
8448             errno = e;
8449             return r;
8450         }
8451         lseek(fd, 0, SEEK_SET);
8452 
8453         return fd;
8454     }
8455 
8456     return -2;
8457 }
8458 
8459 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8460                     int flags, mode_t mode, bool safe)
8461 {
8462     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8463     if (fd > -2) {
8464         return fd;
8465     }
8466 
8467     if (safe) {
8468         return safe_openat(dirfd, path(pathname), flags, mode);
8469     } else {
8470         return openat(dirfd, path(pathname), flags, mode);
8471     }
8472 }
8473 
8474 
8475 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8476                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8477                       abi_ulong guest_size)
8478 {
8479     struct open_how_ver0 how = {0};
8480     char *pathname;
8481     int ret;
8482 
8483     if (guest_size < sizeof(struct target_open_how_ver0)) {
8484         return -TARGET_EINVAL;
8485     }
8486     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8487     if (ret) {
8488         if (ret == -TARGET_E2BIG) {
8489             qemu_log_mask(LOG_UNIMP,
8490                           "Unimplemented openat2 open_how size: "
8491                           TARGET_ABI_FMT_lu "\n", guest_size);
8492         }
8493         return ret;
8494     }
8495     pathname = lock_user_string(guest_pathname);
8496     if (!pathname) {
8497         return -TARGET_EFAULT;
8498     }
8499 
8500     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8501     how.mode = tswap64(how.mode);
8502     how.resolve = tswap64(how.resolve);
8503     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8504                                 how.resolve, true);
8505     if (fd > -2) {
8506         ret = get_errno(fd);
8507     } else {
8508         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8509                                      sizeof(struct open_how_ver0)));
8510     }
8511 
8512     fd_trans_unregister(ret);
8513     unlock_user(pathname, guest_pathname, 0);
8514     return ret;
8515 }
8516 
8517 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8518 {
8519     ssize_t ret;
8520 
8521     if (!pathname || !buf) {
8522         errno = EFAULT;
8523         return -1;
8524     }
8525 
8526     if (!bufsiz) {
8527         /* Short circuit this for the magic exe check. */
8528         errno = EINVAL;
8529         return -1;
8530     }
8531 
8532     if (is_proc_myself((const char *)pathname, "exe")) {
8533         /*
8534          * Don't worry about sign mismatch as earlier mapping
8535          * logic would have thrown a bad address error.
8536          */
8537         ret = MIN(strlen(exec_path), bufsiz);
8538         /* We cannot NUL terminate the string. */
8539         memcpy(buf, exec_path, ret);
8540     } else {
8541         ret = readlink(path(pathname), buf, bufsiz);
8542     }
8543 
8544     return ret;
8545 }
8546 
8547 static int do_execv(CPUArchState *cpu_env, int dirfd,
8548                     abi_long pathname, abi_long guest_argp,
8549                     abi_long guest_envp, int flags, bool is_execveat)
8550 {
8551     int ret;
8552     char **argp, **envp;
8553     int argc, envc;
8554     abi_ulong gp;
8555     abi_ulong addr;
8556     char **q;
8557     void *p;
8558 
8559     argc = 0;
8560 
8561     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8562         if (get_user_ual(addr, gp)) {
8563             return -TARGET_EFAULT;
8564         }
8565         if (!addr) {
8566             break;
8567         }
8568         argc++;
8569     }
8570     envc = 0;
8571     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8572         if (get_user_ual(addr, gp)) {
8573             return -TARGET_EFAULT;
8574         }
8575         if (!addr) {
8576             break;
8577         }
8578         envc++;
8579     }
8580 
8581     argp = g_new0(char *, argc + 1);
8582     envp = g_new0(char *, envc + 1);
8583 
8584     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8585         if (get_user_ual(addr, gp)) {
8586             goto execve_efault;
8587         }
8588         if (!addr) {
8589             break;
8590         }
8591         *q = lock_user_string(addr);
8592         if (!*q) {
8593             goto execve_efault;
8594         }
8595     }
8596     *q = NULL;
8597 
8598     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8599         if (get_user_ual(addr, gp)) {
8600             goto execve_efault;
8601         }
8602         if (!addr) {
8603             break;
8604         }
8605         *q = lock_user_string(addr);
8606         if (!*q) {
8607             goto execve_efault;
8608         }
8609     }
8610     *q = NULL;
8611 
8612     /*
8613      * Although execve() is not an interruptible syscall it is
8614      * a special case where we must use the safe_syscall wrapper:
8615      * if we allow a signal to happen before we make the host
8616      * syscall then we will 'lose' it, because at the point of
8617      * execve the process leaves QEMU's control. So we use the
8618      * safe syscall wrapper to ensure that we either take the
8619      * signal as a guest signal, or else it does not happen
8620      * before the execve completes and makes it the other
8621      * program's problem.
8622      */
8623     p = lock_user_string(pathname);
8624     if (!p) {
8625         goto execve_efault;
8626     }
8627 
8628     const char *exe = p;
8629     if (is_proc_myself(p, "exe")) {
8630         exe = exec_path;
8631     }
8632     ret = is_execveat
8633         ? safe_execveat(dirfd, exe, argp, envp, flags)
8634         : safe_execve(exe, argp, envp);
8635     ret = get_errno(ret);
8636 
8637     unlock_user(p, pathname, 0);
8638 
8639     goto execve_end;
8640 
8641 execve_efault:
8642     ret = -TARGET_EFAULT;
8643 
8644 execve_end:
8645     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8646         if (get_user_ual(addr, gp) || !addr) {
8647             break;
8648         }
8649         unlock_user(*q, addr, 0);
8650     }
8651     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8652         if (get_user_ual(addr, gp) || !addr) {
8653             break;
8654         }
8655         unlock_user(*q, addr, 0);
8656     }
8657 
8658     g_free(argp);
8659     g_free(envp);
8660     return ret;
8661 }
8662 
8663 #define TIMER_MAGIC 0x0caf0000
8664 #define TIMER_MAGIC_MASK 0xffff0000
8665 
8666 /* Convert QEMU provided timer ID back to internal 16bit index format */
8667 static target_timer_t get_timer_id(abi_long arg)
8668 {
8669     target_timer_t timerid = arg;
8670 
8671     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8672         return -TARGET_EINVAL;
8673     }
8674 
8675     timerid &= 0xffff;
8676 
8677     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8678         return -TARGET_EINVAL;
8679     }
8680 
8681     return timerid;
8682 }
8683 
8684 static int target_to_host_cpu_mask(unsigned long *host_mask,
8685                                    size_t host_size,
8686                                    abi_ulong target_addr,
8687                                    size_t target_size)
8688 {
8689     unsigned target_bits = sizeof(abi_ulong) * 8;
8690     unsigned host_bits = sizeof(*host_mask) * 8;
8691     abi_ulong *target_mask;
8692     unsigned i, j;
8693 
8694     assert(host_size >= target_size);
8695 
8696     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8697     if (!target_mask) {
8698         return -TARGET_EFAULT;
8699     }
8700     memset(host_mask, 0, host_size);
8701 
8702     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8703         unsigned bit = i * target_bits;
8704         abi_ulong val;
8705 
8706         __get_user(val, &target_mask[i]);
8707         for (j = 0; j < target_bits; j++, bit++) {
8708             if (val & (1UL << j)) {
8709                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8710             }
8711         }
8712     }
8713 
8714     unlock_user(target_mask, target_addr, 0);
8715     return 0;
8716 }
8717 
8718 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8719                                    size_t host_size,
8720                                    abi_ulong target_addr,
8721                                    size_t target_size)
8722 {
8723     unsigned target_bits = sizeof(abi_ulong) * 8;
8724     unsigned host_bits = sizeof(*host_mask) * 8;
8725     abi_ulong *target_mask;
8726     unsigned i, j;
8727 
8728     assert(host_size >= target_size);
8729 
8730     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8731     if (!target_mask) {
8732         return -TARGET_EFAULT;
8733     }
8734 
8735     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8736         unsigned bit = i * target_bits;
8737         abi_ulong val = 0;
8738 
8739         for (j = 0; j < target_bits; j++, bit++) {
8740             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8741                 val |= 1UL << j;
8742             }
8743         }
8744         __put_user(val, &target_mask[i]);
8745     }
8746 
8747     unlock_user(target_mask, target_addr, target_size);
8748     return 0;
8749 }
8750 
8751 #ifdef TARGET_NR_getdents
8752 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8753 {
8754     g_autofree void *hdirp = NULL;
8755     void *tdirp;
8756     int hlen, hoff, toff;
8757     int hreclen, treclen;
8758     off_t prev_diroff = 0;
8759 
8760     hdirp = g_try_malloc(count);
8761     if (!hdirp) {
8762         return -TARGET_ENOMEM;
8763     }
8764 
8765 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8766     hlen = sys_getdents(dirfd, hdirp, count);
8767 #else
8768     hlen = sys_getdents64(dirfd, hdirp, count);
8769 #endif
8770 
8771     hlen = get_errno(hlen);
8772     if (is_error(hlen)) {
8773         return hlen;
8774     }
8775 
8776     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8777     if (!tdirp) {
8778         return -TARGET_EFAULT;
8779     }
8780 
8781     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8782 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8783         struct linux_dirent *hde = hdirp + hoff;
8784 #else
8785         struct linux_dirent64 *hde = hdirp + hoff;
8786 #endif
8787         struct target_dirent *tde = tdirp + toff;
8788         int namelen;
8789         uint8_t type;
8790 
8791         namelen = strlen(hde->d_name);
8792         hreclen = hde->d_reclen;
8793         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8794         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8795 
8796         if (toff + treclen > count) {
8797             /*
8798              * If the host struct is smaller than the target struct, or
8799              * requires less alignment and thus packs into less space,
8800              * then the host can return more entries than we can pass
8801              * on to the guest.
8802              */
8803             if (toff == 0) {
8804                 toff = -TARGET_EINVAL; /* result buffer is too small */
8805                 break;
8806             }
8807             /*
8808              * Return what we have, resetting the file pointer to the
8809              * location of the first record not returned.
8810              */
8811             lseek(dirfd, prev_diroff, SEEK_SET);
8812             break;
8813         }
8814 
8815         prev_diroff = hde->d_off;
8816         tde->d_ino = tswapal(hde->d_ino);
8817         tde->d_off = tswapal(hde->d_off);
8818         tde->d_reclen = tswap16(treclen);
8819         memcpy(tde->d_name, hde->d_name, namelen + 1);
8820 
8821         /*
8822          * The getdents type is in what was formerly a padding byte at the
8823          * end of the structure.
8824          */
8825 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8826         type = *((uint8_t *)hde + hreclen - 1);
8827 #else
8828         type = hde->d_type;
8829 #endif
8830         *((uint8_t *)tde + treclen - 1) = type;
8831     }
8832 
8833     unlock_user(tdirp, arg2, toff);
8834     return toff;
8835 }
8836 #endif /* TARGET_NR_getdents */
8837 
8838 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8839 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8840 {
8841     g_autofree void *hdirp = NULL;
8842     void *tdirp;
8843     int hlen, hoff, toff;
8844     int hreclen, treclen;
8845     off_t prev_diroff = 0;
8846 
8847     hdirp = g_try_malloc(count);
8848     if (!hdirp) {
8849         return -TARGET_ENOMEM;
8850     }
8851 
8852     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8853     if (is_error(hlen)) {
8854         return hlen;
8855     }
8856 
8857     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8858     if (!tdirp) {
8859         return -TARGET_EFAULT;
8860     }
8861 
8862     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8863         struct linux_dirent64 *hde = hdirp + hoff;
8864         struct target_dirent64 *tde = tdirp + toff;
8865         int namelen;
8866 
8867         namelen = strlen(hde->d_name) + 1;
8868         hreclen = hde->d_reclen;
8869         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8870         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8871 
8872         if (toff + treclen > count) {
8873             /*
8874              * If the host struct is smaller than the target struct, or
8875              * requires less alignment and thus packs into less space,
8876              * then the host can return more entries than we can pass
8877              * on to the guest.
8878              */
8879             if (toff == 0) {
8880                 toff = -TARGET_EINVAL; /* result buffer is too small */
8881                 break;
8882             }
8883             /*
8884              * Return what we have, resetting the file pointer to the
8885              * location of the first record not returned.
8886              */
8887             lseek(dirfd, prev_diroff, SEEK_SET);
8888             break;
8889         }
8890 
8891         prev_diroff = hde->d_off;
8892         tde->d_ino = tswap64(hde->d_ino);
8893         tde->d_off = tswap64(hde->d_off);
8894         tde->d_reclen = tswap16(treclen);
8895         tde->d_type = hde->d_type;
8896         memcpy(tde->d_name, hde->d_name, namelen);
8897     }
8898 
8899     unlock_user(tdirp, arg2, toff);
8900     return toff;
8901 }
8902 #endif /* TARGET_NR_getdents64 */
8903 
8904 #if defined(TARGET_NR_riscv_hwprobe)
8905 
8906 #define RISCV_HWPROBE_KEY_MVENDORID     0
8907 #define RISCV_HWPROBE_KEY_MARCHID       1
8908 #define RISCV_HWPROBE_KEY_MIMPID        2
8909 
8910 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8911 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8912 
8913 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8914 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8915 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8916 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8917 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8918 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8919 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8920 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8921 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8922 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8923 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8924 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8925 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8926 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8927 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8928 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8929 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8930 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8931 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8932 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8933 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8934 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8935 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8936 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8937 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8938 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8939 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8940 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8941 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8942 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8943 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8944 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8945 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1 << 31)
8946 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8947 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8948 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8949 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8950 
8951 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8952 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8953 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8954 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8955 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8956 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8957 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8958 
8959 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8960 
8961 struct riscv_hwprobe {
8962     abi_llong  key;
8963     abi_ullong value;
8964 };
8965 
8966 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8967                                     struct riscv_hwprobe *pair,
8968                                     size_t pair_count)
8969 {
8970     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
8971 
8972     for (; pair_count > 0; pair_count--, pair++) {
8973         abi_llong key;
8974         abi_ullong value;
8975         __put_user(0, &pair->value);
8976         __get_user(key, &pair->key);
8977         switch (key) {
8978         case RISCV_HWPROBE_KEY_MVENDORID:
8979             __put_user(cfg->mvendorid, &pair->value);
8980             break;
8981         case RISCV_HWPROBE_KEY_MARCHID:
8982             __put_user(cfg->marchid, &pair->value);
8983             break;
8984         case RISCV_HWPROBE_KEY_MIMPID:
8985             __put_user(cfg->mimpid, &pair->value);
8986             break;
8987         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
8988             value = riscv_has_ext(env, RVI) &&
8989                     riscv_has_ext(env, RVM) &&
8990                     riscv_has_ext(env, RVA) ?
8991                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
8992             __put_user(value, &pair->value);
8993             break;
8994         case RISCV_HWPROBE_KEY_IMA_EXT_0:
8995             value = riscv_has_ext(env, RVF) &&
8996                     riscv_has_ext(env, RVD) ?
8997                     RISCV_HWPROBE_IMA_FD : 0;
8998             value |= riscv_has_ext(env, RVC) ?
8999                      RISCV_HWPROBE_IMA_C : 0;
9000             value |= riscv_has_ext(env, RVV) ?
9001                      RISCV_HWPROBE_IMA_V : 0;
9002             value |= cfg->ext_zba ?
9003                      RISCV_HWPROBE_EXT_ZBA : 0;
9004             value |= cfg->ext_zbb ?
9005                      RISCV_HWPROBE_EXT_ZBB : 0;
9006             value |= cfg->ext_zbs ?
9007                      RISCV_HWPROBE_EXT_ZBS : 0;
9008             value |= cfg->ext_zicboz ?
9009                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9010             value |= cfg->ext_zbc ?
9011                      RISCV_HWPROBE_EXT_ZBC : 0;
9012             value |= cfg->ext_zbkb ?
9013                      RISCV_HWPROBE_EXT_ZBKB : 0;
9014             value |= cfg->ext_zbkc ?
9015                      RISCV_HWPROBE_EXT_ZBKC : 0;
9016             value |= cfg->ext_zbkx ?
9017                      RISCV_HWPROBE_EXT_ZBKX : 0;
9018             value |= cfg->ext_zknd ?
9019                      RISCV_HWPROBE_EXT_ZKND : 0;
9020             value |= cfg->ext_zkne ?
9021                      RISCV_HWPROBE_EXT_ZKNE : 0;
9022             value |= cfg->ext_zknh ?
9023                      RISCV_HWPROBE_EXT_ZKNH : 0;
9024             value |= cfg->ext_zksed ?
9025                      RISCV_HWPROBE_EXT_ZKSED : 0;
9026             value |= cfg->ext_zksh ?
9027                      RISCV_HWPROBE_EXT_ZKSH : 0;
9028             value |= cfg->ext_zkt ?
9029                      RISCV_HWPROBE_EXT_ZKT : 0;
9030             value |= cfg->ext_zvbb ?
9031                      RISCV_HWPROBE_EXT_ZVBB : 0;
9032             value |= cfg->ext_zvbc ?
9033                      RISCV_HWPROBE_EXT_ZVBC : 0;
9034             value |= cfg->ext_zvkb ?
9035                      RISCV_HWPROBE_EXT_ZVKB : 0;
9036             value |= cfg->ext_zvkg ?
9037                      RISCV_HWPROBE_EXT_ZVKG : 0;
9038             value |= cfg->ext_zvkned ?
9039                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9040             value |= cfg->ext_zvknha ?
9041                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9042             value |= cfg->ext_zvknhb ?
9043                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9044             value |= cfg->ext_zvksed ?
9045                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9046             value |= cfg->ext_zvksh ?
9047                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9048             value |= cfg->ext_zvkt ?
9049                      RISCV_HWPROBE_EXT_ZVKT : 0;
9050             value |= cfg->ext_zfh ?
9051                      RISCV_HWPROBE_EXT_ZFH : 0;
9052             value |= cfg->ext_zfhmin ?
9053                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9054             value |= cfg->ext_zihintntl ?
9055                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9056             value |= cfg->ext_zvfh ?
9057                      RISCV_HWPROBE_EXT_ZVFH : 0;
9058             value |= cfg->ext_zvfhmin ?
9059                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9060             value |= cfg->ext_zfa ?
9061                      RISCV_HWPROBE_EXT_ZFA : 0;
9062             value |= cfg->ext_ztso ?
9063                      RISCV_HWPROBE_EXT_ZTSO : 0;
9064             value |= cfg->ext_zacas ?
9065                      RISCV_HWPROBE_EXT_ZACAS : 0;
9066             value |= cfg->ext_zicond ?
9067                      RISCV_HWPROBE_EXT_ZICOND : 0;
9068             __put_user(value, &pair->value);
9069             break;
9070         case RISCV_HWPROBE_KEY_CPUPERF_0:
9071             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9072             break;
9073         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9074             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9075             __put_user(value, &pair->value);
9076             break;
9077         default:
9078             __put_user(-1, &pair->key);
9079             break;
9080         }
9081     }
9082 }
9083 
9084 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9085 {
9086     int ret, i, tmp;
9087     size_t host_mask_size, target_mask_size;
9088     unsigned long *host_mask;
9089 
9090     /*
9091      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9092      * arg3 contains the cpu count.
9093      */
9094     tmp = (8 * sizeof(abi_ulong));
9095     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9096     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9097                      ~(sizeof(*host_mask) - 1);
9098 
9099     host_mask = alloca(host_mask_size);
9100 
9101     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9102                                   arg4, target_mask_size);
9103     if (ret != 0) {
9104         return ret;
9105     }
9106 
9107     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9108         if (host_mask[i] != 0) {
9109             return 0;
9110         }
9111     }
9112     return -TARGET_EINVAL;
9113 }
9114 
9115 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9116                                  abi_long arg2, abi_long arg3,
9117                                  abi_long arg4, abi_long arg5)
9118 {
9119     int ret;
9120     struct riscv_hwprobe *host_pairs;
9121 
9122     /* flags must be 0 */
9123     if (arg5 != 0) {
9124         return -TARGET_EINVAL;
9125     }
9126 
9127     /* check cpu_set */
9128     if (arg3 != 0) {
9129         ret = cpu_set_valid(arg3, arg4);
9130         if (ret != 0) {
9131             return ret;
9132         }
9133     } else if (arg4 != 0) {
9134         return -TARGET_EINVAL;
9135     }
9136 
9137     /* no pairs */
9138     if (arg2 == 0) {
9139         return 0;
9140     }
9141 
9142     host_pairs = lock_user(VERIFY_WRITE, arg1,
9143                            sizeof(*host_pairs) * (size_t)arg2, 0);
9144     if (host_pairs == NULL) {
9145         return -TARGET_EFAULT;
9146     }
9147     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9148     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9149     return 0;
9150 }
9151 #endif /* TARGET_NR_riscv_hwprobe */
9152 
9153 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9154 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9155 #endif
9156 
9157 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9158 #define __NR_sys_open_tree __NR_open_tree
9159 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9160           unsigned int, __flags)
9161 #endif
9162 
9163 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9164 #define __NR_sys_move_mount __NR_move_mount
9165 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9166            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9167 #endif
9168 
9169 /* This is an internal helper for do_syscall so that it is easier
9170  * to have a single return point, so that actions, such as logging
9171  * of syscall results, can be performed.
9172  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9173  */
9174 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9175                             abi_long arg2, abi_long arg3, abi_long arg4,
9176                             abi_long arg5, abi_long arg6, abi_long arg7,
9177                             abi_long arg8)
9178 {
9179     CPUState *cpu = env_cpu(cpu_env);
9180     abi_long ret;
9181 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9182     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9183     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9184     || defined(TARGET_NR_statx)
9185     struct stat st;
9186 #endif
9187 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9188     || defined(TARGET_NR_fstatfs)
9189     struct statfs stfs;
9190 #endif
9191     void *p;
9192 
9193     switch(num) {
9194     case TARGET_NR_exit:
9195         /* In old applications this may be used to implement _exit(2).
9196            However in threaded applications it is used for thread termination,
9197            and _exit_group is used for application termination.
9198            Do thread termination if we have more then one thread.  */
9199 
9200         if (block_signals()) {
9201             return -QEMU_ERESTARTSYS;
9202         }
9203 
9204         pthread_mutex_lock(&clone_lock);
9205 
9206         if (CPU_NEXT(first_cpu)) {
9207             TaskState *ts = get_task_state(cpu);
9208 
9209             if (ts->child_tidptr) {
9210                 put_user_u32(0, ts->child_tidptr);
9211                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9212                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9213             }
9214 
9215             object_unparent(OBJECT(cpu));
9216             object_unref(OBJECT(cpu));
9217             /*
9218              * At this point the CPU should be unrealized and removed
9219              * from cpu lists. We can clean-up the rest of the thread
9220              * data without the lock held.
9221              */
9222 
9223             pthread_mutex_unlock(&clone_lock);
9224 
9225             thread_cpu = NULL;
9226             g_free(ts);
9227             rcu_unregister_thread();
9228             pthread_exit(NULL);
9229         }
9230 
9231         pthread_mutex_unlock(&clone_lock);
9232         preexit_cleanup(cpu_env, arg1);
9233         _exit(arg1);
9234         return 0; /* avoid warning */
9235     case TARGET_NR_read:
9236         if (arg2 == 0 && arg3 == 0) {
9237             return get_errno(safe_read(arg1, 0, 0));
9238         } else {
9239             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9240                 return -TARGET_EFAULT;
9241             ret = get_errno(safe_read(arg1, p, arg3));
9242             if (ret >= 0 &&
9243                 fd_trans_host_to_target_data(arg1)) {
9244                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9245             }
9246             unlock_user(p, arg2, ret);
9247         }
9248         return ret;
9249     case TARGET_NR_write:
9250         if (arg2 == 0 && arg3 == 0) {
9251             return get_errno(safe_write(arg1, 0, 0));
9252         }
9253         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9254             return -TARGET_EFAULT;
9255         if (fd_trans_target_to_host_data(arg1)) {
9256             void *copy = g_malloc(arg3);
9257             memcpy(copy, p, arg3);
9258             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9259             if (ret >= 0) {
9260                 ret = get_errno(safe_write(arg1, copy, ret));
9261             }
9262             g_free(copy);
9263         } else {
9264             ret = get_errno(safe_write(arg1, p, arg3));
9265         }
9266         unlock_user(p, arg2, 0);
9267         return ret;
9268 
9269 #ifdef TARGET_NR_open
9270     case TARGET_NR_open:
9271         if (!(p = lock_user_string(arg1)))
9272             return -TARGET_EFAULT;
9273         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9274                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9275                                   arg3, true));
9276         fd_trans_unregister(ret);
9277         unlock_user(p, arg1, 0);
9278         return ret;
9279 #endif
9280     case TARGET_NR_openat:
9281         if (!(p = lock_user_string(arg2)))
9282             return -TARGET_EFAULT;
9283         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9284                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9285                                   arg4, true));
9286         fd_trans_unregister(ret);
9287         unlock_user(p, arg2, 0);
9288         return ret;
9289     case TARGET_NR_openat2:
9290         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9291         return ret;
9292 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9293     case TARGET_NR_name_to_handle_at:
9294         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9295         return ret;
9296 #endif
9297 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9298     case TARGET_NR_open_by_handle_at:
9299         ret = do_open_by_handle_at(arg1, arg2, arg3);
9300         fd_trans_unregister(ret);
9301         return ret;
9302 #endif
9303 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9304     case TARGET_NR_pidfd_open:
9305         return get_errno(pidfd_open(arg1, arg2));
9306 #endif
9307 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9308     case TARGET_NR_pidfd_send_signal:
9309         {
9310             siginfo_t uinfo, *puinfo;
9311 
9312             if (arg3) {
9313                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9314                 if (!p) {
9315                     return -TARGET_EFAULT;
9316                  }
9317                  target_to_host_siginfo(&uinfo, p);
9318                  unlock_user(p, arg3, 0);
9319                  puinfo = &uinfo;
9320             } else {
9321                  puinfo = NULL;
9322             }
9323             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9324                                               puinfo, arg4));
9325         }
9326         return ret;
9327 #endif
9328 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9329     case TARGET_NR_pidfd_getfd:
9330         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9331 #endif
9332     case TARGET_NR_close:
9333         fd_trans_unregister(arg1);
9334         return get_errno(close(arg1));
9335 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9336     case TARGET_NR_close_range:
9337         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9338         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9339             abi_long fd, maxfd;
9340             maxfd = MIN(arg2, target_fd_max);
9341             for (fd = arg1; fd < maxfd; fd++) {
9342                 fd_trans_unregister(fd);
9343             }
9344         }
9345         return ret;
9346 #endif
9347 
9348     case TARGET_NR_brk:
9349         return do_brk(arg1);
9350 #ifdef TARGET_NR_fork
9351     case TARGET_NR_fork:
9352         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9353 #endif
9354 #ifdef TARGET_NR_waitpid
9355     case TARGET_NR_waitpid:
9356         {
9357             int status;
9358             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9359             if (!is_error(ret) && arg2 && ret
9360                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9361                 return -TARGET_EFAULT;
9362         }
9363         return ret;
9364 #endif
9365 #ifdef TARGET_NR_waitid
9366     case TARGET_NR_waitid:
9367         {
9368             struct rusage ru;
9369             siginfo_t info;
9370 
9371             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9372                                         arg4, (arg5 ? &ru : NULL)));
9373             if (!is_error(ret)) {
9374                 if (arg3) {
9375                     p = lock_user(VERIFY_WRITE, arg3,
9376                                   sizeof(target_siginfo_t), 0);
9377                     if (!p) {
9378                         return -TARGET_EFAULT;
9379                     }
9380                     host_to_target_siginfo(p, &info);
9381                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9382                 }
9383                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9384                     return -TARGET_EFAULT;
9385                 }
9386             }
9387         }
9388         return ret;
9389 #endif
9390 #ifdef TARGET_NR_creat /* not on alpha */
9391     case TARGET_NR_creat:
9392         if (!(p = lock_user_string(arg1)))
9393             return -TARGET_EFAULT;
9394         ret = get_errno(creat(p, arg2));
9395         fd_trans_unregister(ret);
9396         unlock_user(p, arg1, 0);
9397         return ret;
9398 #endif
9399 #ifdef TARGET_NR_link
9400     case TARGET_NR_link:
9401         {
9402             void * p2;
9403             p = lock_user_string(arg1);
9404             p2 = lock_user_string(arg2);
9405             if (!p || !p2)
9406                 ret = -TARGET_EFAULT;
9407             else
9408                 ret = get_errno(link(p, p2));
9409             unlock_user(p2, arg2, 0);
9410             unlock_user(p, arg1, 0);
9411         }
9412         return ret;
9413 #endif
9414 #if defined(TARGET_NR_linkat)
9415     case TARGET_NR_linkat:
9416         {
9417             void * p2 = NULL;
9418             if (!arg2 || !arg4)
9419                 return -TARGET_EFAULT;
9420             p  = lock_user_string(arg2);
9421             p2 = lock_user_string(arg4);
9422             if (!p || !p2)
9423                 ret = -TARGET_EFAULT;
9424             else
9425                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9426             unlock_user(p, arg2, 0);
9427             unlock_user(p2, arg4, 0);
9428         }
9429         return ret;
9430 #endif
9431 #ifdef TARGET_NR_unlink
9432     case TARGET_NR_unlink:
9433         if (!(p = lock_user_string(arg1)))
9434             return -TARGET_EFAULT;
9435         ret = get_errno(unlink(p));
9436         unlock_user(p, arg1, 0);
9437         return ret;
9438 #endif
9439 #if defined(TARGET_NR_unlinkat)
9440     case TARGET_NR_unlinkat:
9441         if (!(p = lock_user_string(arg2)))
9442             return -TARGET_EFAULT;
9443         ret = get_errno(unlinkat(arg1, p, arg3));
9444         unlock_user(p, arg2, 0);
9445         return ret;
9446 #endif
9447     case TARGET_NR_execveat:
9448         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9449     case TARGET_NR_execve:
9450         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9451     case TARGET_NR_chdir:
9452         if (!(p = lock_user_string(arg1)))
9453             return -TARGET_EFAULT;
9454         ret = get_errno(chdir(p));
9455         unlock_user(p, arg1, 0);
9456         return ret;
9457 #ifdef TARGET_NR_time
9458     case TARGET_NR_time:
9459         {
9460             time_t host_time;
9461             ret = get_errno(time(&host_time));
9462             if (!is_error(ret)
9463                 && arg1
9464                 && put_user_sal(host_time, arg1))
9465                 return -TARGET_EFAULT;
9466         }
9467         return ret;
9468 #endif
9469 #ifdef TARGET_NR_mknod
9470     case TARGET_NR_mknod:
9471         if (!(p = lock_user_string(arg1)))
9472             return -TARGET_EFAULT;
9473         ret = get_errno(mknod(p, arg2, arg3));
9474         unlock_user(p, arg1, 0);
9475         return ret;
9476 #endif
9477 #if defined(TARGET_NR_mknodat)
9478     case TARGET_NR_mknodat:
9479         if (!(p = lock_user_string(arg2)))
9480             return -TARGET_EFAULT;
9481         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9482         unlock_user(p, arg2, 0);
9483         return ret;
9484 #endif
9485 #ifdef TARGET_NR_chmod
9486     case TARGET_NR_chmod:
9487         if (!(p = lock_user_string(arg1)))
9488             return -TARGET_EFAULT;
9489         ret = get_errno(chmod(p, arg2));
9490         unlock_user(p, arg1, 0);
9491         return ret;
9492 #endif
9493 #ifdef TARGET_NR_lseek
9494     case TARGET_NR_lseek:
9495         return get_errno(lseek(arg1, arg2, arg3));
9496 #endif
9497 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9498     /* Alpha specific */
9499     case TARGET_NR_getxpid:
9500         cpu_env->ir[IR_A4] = getppid();
9501         return get_errno(getpid());
9502 #endif
9503 #ifdef TARGET_NR_getpid
9504     case TARGET_NR_getpid:
9505         return get_errno(getpid());
9506 #endif
9507     case TARGET_NR_mount:
9508         {
9509             /* need to look at the data field */
9510             void *p2, *p3;
9511 
9512             if (arg1) {
9513                 p = lock_user_string(arg1);
9514                 if (!p) {
9515                     return -TARGET_EFAULT;
9516                 }
9517             } else {
9518                 p = NULL;
9519             }
9520 
9521             p2 = lock_user_string(arg2);
9522             if (!p2) {
9523                 if (arg1) {
9524                     unlock_user(p, arg1, 0);
9525                 }
9526                 return -TARGET_EFAULT;
9527             }
9528 
9529             if (arg3) {
9530                 p3 = lock_user_string(arg3);
9531                 if (!p3) {
9532                     if (arg1) {
9533                         unlock_user(p, arg1, 0);
9534                     }
9535                     unlock_user(p2, arg2, 0);
9536                     return -TARGET_EFAULT;
9537                 }
9538             } else {
9539                 p3 = NULL;
9540             }
9541 
9542             /* FIXME - arg5 should be locked, but it isn't clear how to
9543              * do that since it's not guaranteed to be a NULL-terminated
9544              * string.
9545              */
9546             if (!arg5) {
9547                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9548             } else {
9549                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9550             }
9551             ret = get_errno(ret);
9552 
9553             if (arg1) {
9554                 unlock_user(p, arg1, 0);
9555             }
9556             unlock_user(p2, arg2, 0);
9557             if (arg3) {
9558                 unlock_user(p3, arg3, 0);
9559             }
9560         }
9561         return ret;
9562 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9563 #if defined(TARGET_NR_umount)
9564     case TARGET_NR_umount:
9565 #endif
9566 #if defined(TARGET_NR_oldumount)
9567     case TARGET_NR_oldumount:
9568 #endif
9569         if (!(p = lock_user_string(arg1)))
9570             return -TARGET_EFAULT;
9571         ret = get_errno(umount(p));
9572         unlock_user(p, arg1, 0);
9573         return ret;
9574 #endif
9575 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9576     case TARGET_NR_move_mount:
9577         {
9578             void *p2, *p4;
9579 
9580             if (!arg2 || !arg4) {
9581                 return -TARGET_EFAULT;
9582             }
9583 
9584             p2 = lock_user_string(arg2);
9585             if (!p2) {
9586                 return -TARGET_EFAULT;
9587             }
9588 
9589             p4 = lock_user_string(arg4);
9590             if (!p4) {
9591                 unlock_user(p2, arg2, 0);
9592                 return -TARGET_EFAULT;
9593             }
9594             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9595 
9596             unlock_user(p2, arg2, 0);
9597             unlock_user(p4, arg4, 0);
9598 
9599             return ret;
9600         }
9601 #endif
9602 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9603     case TARGET_NR_open_tree:
9604         {
9605             void *p2;
9606             int host_flags;
9607 
9608             if (!arg2) {
9609                 return -TARGET_EFAULT;
9610             }
9611 
9612             p2 = lock_user_string(arg2);
9613             if (!p2) {
9614                 return -TARGET_EFAULT;
9615             }
9616 
9617             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9618             if (arg3 & TARGET_O_CLOEXEC) {
9619                 host_flags |= O_CLOEXEC;
9620             }
9621 
9622             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9623 
9624             unlock_user(p2, arg2, 0);
9625 
9626             return ret;
9627         }
9628 #endif
9629 #ifdef TARGET_NR_stime /* not on alpha */
9630     case TARGET_NR_stime:
9631         {
9632             struct timespec ts;
9633             ts.tv_nsec = 0;
9634             if (get_user_sal(ts.tv_sec, arg1)) {
9635                 return -TARGET_EFAULT;
9636             }
9637             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9638         }
9639 #endif
9640 #ifdef TARGET_NR_alarm /* not on alpha */
9641     case TARGET_NR_alarm:
9642         return alarm(arg1);
9643 #endif
9644 #ifdef TARGET_NR_pause /* not on alpha */
9645     case TARGET_NR_pause:
9646         if (!block_signals()) {
9647             sigsuspend(&get_task_state(cpu)->signal_mask);
9648         }
9649         return -TARGET_EINTR;
9650 #endif
9651 #ifdef TARGET_NR_utime
9652     case TARGET_NR_utime:
9653         {
9654             struct utimbuf tbuf, *host_tbuf;
9655             struct target_utimbuf *target_tbuf;
9656             if (arg2) {
9657                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9658                     return -TARGET_EFAULT;
9659                 tbuf.actime = tswapal(target_tbuf->actime);
9660                 tbuf.modtime = tswapal(target_tbuf->modtime);
9661                 unlock_user_struct(target_tbuf, arg2, 0);
9662                 host_tbuf = &tbuf;
9663             } else {
9664                 host_tbuf = NULL;
9665             }
9666             if (!(p = lock_user_string(arg1)))
9667                 return -TARGET_EFAULT;
9668             ret = get_errno(utime(p, host_tbuf));
9669             unlock_user(p, arg1, 0);
9670         }
9671         return ret;
9672 #endif
9673 #ifdef TARGET_NR_utimes
9674     case TARGET_NR_utimes:
9675         {
9676             struct timeval *tvp, tv[2];
9677             if (arg2) {
9678                 if (copy_from_user_timeval(&tv[0], arg2)
9679                     || copy_from_user_timeval(&tv[1],
9680                                               arg2 + sizeof(struct target_timeval)))
9681                     return -TARGET_EFAULT;
9682                 tvp = tv;
9683             } else {
9684                 tvp = NULL;
9685             }
9686             if (!(p = lock_user_string(arg1)))
9687                 return -TARGET_EFAULT;
9688             ret = get_errno(utimes(p, tvp));
9689             unlock_user(p, arg1, 0);
9690         }
9691         return ret;
9692 #endif
9693 #if defined(TARGET_NR_futimesat)
9694     case TARGET_NR_futimesat:
9695         {
9696             struct timeval *tvp, tv[2];
9697             if (arg3) {
9698                 if (copy_from_user_timeval(&tv[0], arg3)
9699                     || copy_from_user_timeval(&tv[1],
9700                                               arg3 + sizeof(struct target_timeval)))
9701                     return -TARGET_EFAULT;
9702                 tvp = tv;
9703             } else {
9704                 tvp = NULL;
9705             }
9706             if (!(p = lock_user_string(arg2))) {
9707                 return -TARGET_EFAULT;
9708             }
9709             ret = get_errno(futimesat(arg1, path(p), tvp));
9710             unlock_user(p, arg2, 0);
9711         }
9712         return ret;
9713 #endif
9714 #ifdef TARGET_NR_access
9715     case TARGET_NR_access:
9716         if (!(p = lock_user_string(arg1))) {
9717             return -TARGET_EFAULT;
9718         }
9719         ret = get_errno(access(path(p), arg2));
9720         unlock_user(p, arg1, 0);
9721         return ret;
9722 #endif
9723 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9724     case TARGET_NR_faccessat:
9725         if (!(p = lock_user_string(arg2))) {
9726             return -TARGET_EFAULT;
9727         }
9728         ret = get_errno(faccessat(arg1, p, arg3, 0));
9729         unlock_user(p, arg2, 0);
9730         return ret;
9731 #endif
9732 #if defined(TARGET_NR_faccessat2)
9733     case TARGET_NR_faccessat2:
9734         if (!(p = lock_user_string(arg2))) {
9735             return -TARGET_EFAULT;
9736         }
9737         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9738         unlock_user(p, arg2, 0);
9739         return ret;
9740 #endif
9741 #ifdef TARGET_NR_nice /* not on alpha */
9742     case TARGET_NR_nice:
9743         return get_errno(nice(arg1));
9744 #endif
9745     case TARGET_NR_sync:
9746         sync();
9747         return 0;
9748 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9749     case TARGET_NR_syncfs:
9750         return get_errno(syncfs(arg1));
9751 #endif
9752     case TARGET_NR_kill:
9753         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9754 #ifdef TARGET_NR_rename
9755     case TARGET_NR_rename:
9756         {
9757             void *p2;
9758             p = lock_user_string(arg1);
9759             p2 = lock_user_string(arg2);
9760             if (!p || !p2)
9761                 ret = -TARGET_EFAULT;
9762             else
9763                 ret = get_errno(rename(p, p2));
9764             unlock_user(p2, arg2, 0);
9765             unlock_user(p, arg1, 0);
9766         }
9767         return ret;
9768 #endif
9769 #if defined(TARGET_NR_renameat)
9770     case TARGET_NR_renameat:
9771         {
9772             void *p2;
9773             p  = lock_user_string(arg2);
9774             p2 = lock_user_string(arg4);
9775             if (!p || !p2)
9776                 ret = -TARGET_EFAULT;
9777             else
9778                 ret = get_errno(renameat(arg1, p, arg3, p2));
9779             unlock_user(p2, arg4, 0);
9780             unlock_user(p, arg2, 0);
9781         }
9782         return ret;
9783 #endif
9784 #if defined(TARGET_NR_renameat2)
9785     case TARGET_NR_renameat2:
9786         {
9787             void *p2;
9788             p  = lock_user_string(arg2);
9789             p2 = lock_user_string(arg4);
9790             if (!p || !p2) {
9791                 ret = -TARGET_EFAULT;
9792             } else {
9793                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9794             }
9795             unlock_user(p2, arg4, 0);
9796             unlock_user(p, arg2, 0);
9797         }
9798         return ret;
9799 #endif
9800 #ifdef TARGET_NR_mkdir
9801     case TARGET_NR_mkdir:
9802         if (!(p = lock_user_string(arg1)))
9803             return -TARGET_EFAULT;
9804         ret = get_errno(mkdir(p, arg2));
9805         unlock_user(p, arg1, 0);
9806         return ret;
9807 #endif
9808 #if defined(TARGET_NR_mkdirat)
9809     case TARGET_NR_mkdirat:
9810         if (!(p = lock_user_string(arg2)))
9811             return -TARGET_EFAULT;
9812         ret = get_errno(mkdirat(arg1, p, arg3));
9813         unlock_user(p, arg2, 0);
9814         return ret;
9815 #endif
9816 #ifdef TARGET_NR_rmdir
9817     case TARGET_NR_rmdir:
9818         if (!(p = lock_user_string(arg1)))
9819             return -TARGET_EFAULT;
9820         ret = get_errno(rmdir(p));
9821         unlock_user(p, arg1, 0);
9822         return ret;
9823 #endif
9824     case TARGET_NR_dup:
9825         ret = get_errno(dup(arg1));
9826         if (ret >= 0) {
9827             fd_trans_dup(arg1, ret);
9828         }
9829         return ret;
9830 #ifdef TARGET_NR_pipe
9831     case TARGET_NR_pipe:
9832         return do_pipe(cpu_env, arg1, 0, 0);
9833 #endif
9834 #ifdef TARGET_NR_pipe2
9835     case TARGET_NR_pipe2:
9836         return do_pipe(cpu_env, arg1,
9837                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9838 #endif
9839     case TARGET_NR_times:
9840         {
9841             struct target_tms *tmsp;
9842             struct tms tms;
9843             ret = get_errno(times(&tms));
9844             if (arg1) {
9845                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9846                 if (!tmsp)
9847                     return -TARGET_EFAULT;
9848                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9849                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9850                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9851                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9852             }
9853             if (!is_error(ret))
9854                 ret = host_to_target_clock_t(ret);
9855         }
9856         return ret;
9857     case TARGET_NR_acct:
9858         if (arg1 == 0) {
9859             ret = get_errno(acct(NULL));
9860         } else {
9861             if (!(p = lock_user_string(arg1))) {
9862                 return -TARGET_EFAULT;
9863             }
9864             ret = get_errno(acct(path(p)));
9865             unlock_user(p, arg1, 0);
9866         }
9867         return ret;
9868 #ifdef TARGET_NR_umount2
9869     case TARGET_NR_umount2:
9870         if (!(p = lock_user_string(arg1)))
9871             return -TARGET_EFAULT;
9872         ret = get_errno(umount2(p, arg2));
9873         unlock_user(p, arg1, 0);
9874         return ret;
9875 #endif
9876     case TARGET_NR_ioctl:
9877         return do_ioctl(arg1, arg2, arg3);
9878 #ifdef TARGET_NR_fcntl
9879     case TARGET_NR_fcntl:
9880         return do_fcntl(arg1, arg2, arg3);
9881 #endif
9882     case TARGET_NR_setpgid:
9883         return get_errno(setpgid(arg1, arg2));
9884     case TARGET_NR_umask:
9885         return get_errno(umask(arg1));
9886     case TARGET_NR_chroot:
9887         if (!(p = lock_user_string(arg1)))
9888             return -TARGET_EFAULT;
9889         ret = get_errno(chroot(p));
9890         unlock_user(p, arg1, 0);
9891         return ret;
9892 #ifdef TARGET_NR_dup2
9893     case TARGET_NR_dup2:
9894         ret = get_errno(dup2(arg1, arg2));
9895         if (ret >= 0) {
9896             fd_trans_dup(arg1, arg2);
9897         }
9898         return ret;
9899 #endif
9900 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9901     case TARGET_NR_dup3:
9902     {
9903         int host_flags;
9904 
9905         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9906             return -EINVAL;
9907         }
9908         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9909         ret = get_errno(dup3(arg1, arg2, host_flags));
9910         if (ret >= 0) {
9911             fd_trans_dup(arg1, arg2);
9912         }
9913         return ret;
9914     }
9915 #endif
9916 #ifdef TARGET_NR_getppid /* not on alpha */
9917     case TARGET_NR_getppid:
9918         return get_errno(getppid());
9919 #endif
9920 #ifdef TARGET_NR_getpgrp
9921     case TARGET_NR_getpgrp:
9922         return get_errno(getpgrp());
9923 #endif
9924     case TARGET_NR_setsid:
9925         return get_errno(setsid());
9926 #ifdef TARGET_NR_sigaction
9927     case TARGET_NR_sigaction:
9928         {
9929 #if defined(TARGET_MIPS)
9930 	    struct target_sigaction act, oact, *pact, *old_act;
9931 
9932 	    if (arg2) {
9933                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9934                     return -TARGET_EFAULT;
9935 		act._sa_handler = old_act->_sa_handler;
9936 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9937 		act.sa_flags = old_act->sa_flags;
9938 		unlock_user_struct(old_act, arg2, 0);
9939 		pact = &act;
9940 	    } else {
9941 		pact = NULL;
9942 	    }
9943 
9944         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9945 
9946 	    if (!is_error(ret) && arg3) {
9947                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9948                     return -TARGET_EFAULT;
9949 		old_act->_sa_handler = oact._sa_handler;
9950 		old_act->sa_flags = oact.sa_flags;
9951 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9952 		old_act->sa_mask.sig[1] = 0;
9953 		old_act->sa_mask.sig[2] = 0;
9954 		old_act->sa_mask.sig[3] = 0;
9955 		unlock_user_struct(old_act, arg3, 1);
9956 	    }
9957 #else
9958             struct target_old_sigaction *old_act;
9959             struct target_sigaction act, oact, *pact;
9960             if (arg2) {
9961                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9962                     return -TARGET_EFAULT;
9963                 act._sa_handler = old_act->_sa_handler;
9964                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9965                 act.sa_flags = old_act->sa_flags;
9966 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9967                 act.sa_restorer = old_act->sa_restorer;
9968 #endif
9969                 unlock_user_struct(old_act, arg2, 0);
9970                 pact = &act;
9971             } else {
9972                 pact = NULL;
9973             }
9974             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9975             if (!is_error(ret) && arg3) {
9976                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9977                     return -TARGET_EFAULT;
9978                 old_act->_sa_handler = oact._sa_handler;
9979                 old_act->sa_mask = oact.sa_mask.sig[0];
9980                 old_act->sa_flags = oact.sa_flags;
9981 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9982                 old_act->sa_restorer = oact.sa_restorer;
9983 #endif
9984                 unlock_user_struct(old_act, arg3, 1);
9985             }
9986 #endif
9987         }
9988         return ret;
9989 #endif
9990     case TARGET_NR_rt_sigaction:
9991         {
9992             /*
9993              * For Alpha and SPARC this is a 5 argument syscall, with
9994              * a 'restorer' parameter which must be copied into the
9995              * sa_restorer field of the sigaction struct.
9996              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9997              * and arg5 is the sigsetsize.
9998              */
9999 #if defined(TARGET_ALPHA)
10000             target_ulong sigsetsize = arg4;
10001             target_ulong restorer = arg5;
10002 #elif defined(TARGET_SPARC)
10003             target_ulong restorer = arg4;
10004             target_ulong sigsetsize = arg5;
10005 #else
10006             target_ulong sigsetsize = arg4;
10007             target_ulong restorer = 0;
10008 #endif
10009             struct target_sigaction *act = NULL;
10010             struct target_sigaction *oact = NULL;
10011 
10012             if (sigsetsize != sizeof(target_sigset_t)) {
10013                 return -TARGET_EINVAL;
10014             }
10015             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10016                 return -TARGET_EFAULT;
10017             }
10018             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10019                 ret = -TARGET_EFAULT;
10020             } else {
10021                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10022                 if (oact) {
10023                     unlock_user_struct(oact, arg3, 1);
10024                 }
10025             }
10026             if (act) {
10027                 unlock_user_struct(act, arg2, 0);
10028             }
10029         }
10030         return ret;
10031 #ifdef TARGET_NR_sgetmask /* not on alpha */
10032     case TARGET_NR_sgetmask:
10033         {
10034             sigset_t cur_set;
10035             abi_ulong target_set;
10036             ret = do_sigprocmask(0, NULL, &cur_set);
10037             if (!ret) {
10038                 host_to_target_old_sigset(&target_set, &cur_set);
10039                 ret = target_set;
10040             }
10041         }
10042         return ret;
10043 #endif
10044 #ifdef TARGET_NR_ssetmask /* not on alpha */
10045     case TARGET_NR_ssetmask:
10046         {
10047             sigset_t set, oset;
10048             abi_ulong target_set = arg1;
10049             target_to_host_old_sigset(&set, &target_set);
10050             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10051             if (!ret) {
10052                 host_to_target_old_sigset(&target_set, &oset);
10053                 ret = target_set;
10054             }
10055         }
10056         return ret;
10057 #endif
10058 #ifdef TARGET_NR_sigprocmask
10059     case TARGET_NR_sigprocmask:
10060         {
10061 #if defined(TARGET_ALPHA)
10062             sigset_t set, oldset;
10063             abi_ulong mask;
10064             int how;
10065 
10066             switch (arg1) {
10067             case TARGET_SIG_BLOCK:
10068                 how = SIG_BLOCK;
10069                 break;
10070             case TARGET_SIG_UNBLOCK:
10071                 how = SIG_UNBLOCK;
10072                 break;
10073             case TARGET_SIG_SETMASK:
10074                 how = SIG_SETMASK;
10075                 break;
10076             default:
10077                 return -TARGET_EINVAL;
10078             }
10079             mask = arg2;
10080             target_to_host_old_sigset(&set, &mask);
10081 
10082             ret = do_sigprocmask(how, &set, &oldset);
10083             if (!is_error(ret)) {
10084                 host_to_target_old_sigset(&mask, &oldset);
10085                 ret = mask;
10086                 cpu_env->ir[IR_V0] = 0; /* force no error */
10087             }
10088 #else
10089             sigset_t set, oldset, *set_ptr;
10090             int how;
10091 
10092             if (arg2) {
10093                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10094                 if (!p) {
10095                     return -TARGET_EFAULT;
10096                 }
10097                 target_to_host_old_sigset(&set, p);
10098                 unlock_user(p, arg2, 0);
10099                 set_ptr = &set;
10100                 switch (arg1) {
10101                 case TARGET_SIG_BLOCK:
10102                     how = SIG_BLOCK;
10103                     break;
10104                 case TARGET_SIG_UNBLOCK:
10105                     how = SIG_UNBLOCK;
10106                     break;
10107                 case TARGET_SIG_SETMASK:
10108                     how = SIG_SETMASK;
10109                     break;
10110                 default:
10111                     return -TARGET_EINVAL;
10112                 }
10113             } else {
10114                 how = 0;
10115                 set_ptr = NULL;
10116             }
10117             ret = do_sigprocmask(how, set_ptr, &oldset);
10118             if (!is_error(ret) && arg3) {
10119                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10120                     return -TARGET_EFAULT;
10121                 host_to_target_old_sigset(p, &oldset);
10122                 unlock_user(p, arg3, sizeof(target_sigset_t));
10123             }
10124 #endif
10125         }
10126         return ret;
10127 #endif
10128     case TARGET_NR_rt_sigprocmask:
10129         {
10130             int how = arg1;
10131             sigset_t set, oldset, *set_ptr;
10132 
10133             if (arg4 != sizeof(target_sigset_t)) {
10134                 return -TARGET_EINVAL;
10135             }
10136 
10137             if (arg2) {
10138                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10139                 if (!p) {
10140                     return -TARGET_EFAULT;
10141                 }
10142                 target_to_host_sigset(&set, p);
10143                 unlock_user(p, arg2, 0);
10144                 set_ptr = &set;
10145                 switch(how) {
10146                 case TARGET_SIG_BLOCK:
10147                     how = SIG_BLOCK;
10148                     break;
10149                 case TARGET_SIG_UNBLOCK:
10150                     how = SIG_UNBLOCK;
10151                     break;
10152                 case TARGET_SIG_SETMASK:
10153                     how = SIG_SETMASK;
10154                     break;
10155                 default:
10156                     return -TARGET_EINVAL;
10157                 }
10158             } else {
10159                 how = 0;
10160                 set_ptr = NULL;
10161             }
10162             ret = do_sigprocmask(how, set_ptr, &oldset);
10163             if (!is_error(ret) && arg3) {
10164                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10165                     return -TARGET_EFAULT;
10166                 host_to_target_sigset(p, &oldset);
10167                 unlock_user(p, arg3, sizeof(target_sigset_t));
10168             }
10169         }
10170         return ret;
10171 #ifdef TARGET_NR_sigpending
10172     case TARGET_NR_sigpending:
10173         {
10174             sigset_t set;
10175             ret = get_errno(sigpending(&set));
10176             if (!is_error(ret)) {
10177                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10178                     return -TARGET_EFAULT;
10179                 host_to_target_old_sigset(p, &set);
10180                 unlock_user(p, arg1, sizeof(target_sigset_t));
10181             }
10182         }
10183         return ret;
10184 #endif
10185     case TARGET_NR_rt_sigpending:
10186         {
10187             sigset_t set;
10188 
10189             /* Yes, this check is >, not != like most. We follow the kernel's
10190              * logic and it does it like this because it implements
10191              * NR_sigpending through the same code path, and in that case
10192              * the old_sigset_t is smaller in size.
10193              */
10194             if (arg2 > sizeof(target_sigset_t)) {
10195                 return -TARGET_EINVAL;
10196             }
10197 
10198             ret = get_errno(sigpending(&set));
10199             if (!is_error(ret)) {
10200                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10201                     return -TARGET_EFAULT;
10202                 host_to_target_sigset(p, &set);
10203                 unlock_user(p, arg1, sizeof(target_sigset_t));
10204             }
10205         }
10206         return ret;
10207 #ifdef TARGET_NR_sigsuspend
10208     case TARGET_NR_sigsuspend:
10209         {
10210             sigset_t *set;
10211 
10212 #if defined(TARGET_ALPHA)
10213             TaskState *ts = get_task_state(cpu);
10214             /* target_to_host_old_sigset will bswap back */
10215             abi_ulong mask = tswapal(arg1);
10216             set = &ts->sigsuspend_mask;
10217             target_to_host_old_sigset(set, &mask);
10218 #else
10219             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10220             if (ret != 0) {
10221                 return ret;
10222             }
10223 #endif
10224             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10225             finish_sigsuspend_mask(ret);
10226         }
10227         return ret;
10228 #endif
10229     case TARGET_NR_rt_sigsuspend:
10230         {
10231             sigset_t *set;
10232 
10233             ret = process_sigsuspend_mask(&set, arg1, arg2);
10234             if (ret != 0) {
10235                 return ret;
10236             }
10237             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10238             finish_sigsuspend_mask(ret);
10239         }
10240         return ret;
10241 #ifdef TARGET_NR_rt_sigtimedwait
10242     case TARGET_NR_rt_sigtimedwait:
10243         {
10244             sigset_t set;
10245             struct timespec uts, *puts;
10246             siginfo_t uinfo;
10247 
10248             if (arg4 != sizeof(target_sigset_t)) {
10249                 return -TARGET_EINVAL;
10250             }
10251 
10252             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10253                 return -TARGET_EFAULT;
10254             target_to_host_sigset(&set, p);
10255             unlock_user(p, arg1, 0);
10256             if (arg3) {
10257                 puts = &uts;
10258                 if (target_to_host_timespec(puts, arg3)) {
10259                     return -TARGET_EFAULT;
10260                 }
10261             } else {
10262                 puts = NULL;
10263             }
10264             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10265                                                  SIGSET_T_SIZE));
10266             if (!is_error(ret)) {
10267                 if (arg2) {
10268                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10269                                   0);
10270                     if (!p) {
10271                         return -TARGET_EFAULT;
10272                     }
10273                     host_to_target_siginfo(p, &uinfo);
10274                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10275                 }
10276                 ret = host_to_target_signal(ret);
10277             }
10278         }
10279         return ret;
10280 #endif
10281 #ifdef TARGET_NR_rt_sigtimedwait_time64
10282     case TARGET_NR_rt_sigtimedwait_time64:
10283         {
10284             sigset_t set;
10285             struct timespec uts, *puts;
10286             siginfo_t uinfo;
10287 
10288             if (arg4 != sizeof(target_sigset_t)) {
10289                 return -TARGET_EINVAL;
10290             }
10291 
10292             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10293             if (!p) {
10294                 return -TARGET_EFAULT;
10295             }
10296             target_to_host_sigset(&set, p);
10297             unlock_user(p, arg1, 0);
10298             if (arg3) {
10299                 puts = &uts;
10300                 if (target_to_host_timespec64(puts, arg3)) {
10301                     return -TARGET_EFAULT;
10302                 }
10303             } else {
10304                 puts = NULL;
10305             }
10306             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10307                                                  SIGSET_T_SIZE));
10308             if (!is_error(ret)) {
10309                 if (arg2) {
10310                     p = lock_user(VERIFY_WRITE, arg2,
10311                                   sizeof(target_siginfo_t), 0);
10312                     if (!p) {
10313                         return -TARGET_EFAULT;
10314                     }
10315                     host_to_target_siginfo(p, &uinfo);
10316                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10317                 }
10318                 ret = host_to_target_signal(ret);
10319             }
10320         }
10321         return ret;
10322 #endif
10323     case TARGET_NR_rt_sigqueueinfo:
10324         {
10325             siginfo_t uinfo;
10326 
10327             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10328             if (!p) {
10329                 return -TARGET_EFAULT;
10330             }
10331             target_to_host_siginfo(&uinfo, p);
10332             unlock_user(p, arg3, 0);
10333             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10334         }
10335         return ret;
10336     case TARGET_NR_rt_tgsigqueueinfo:
10337         {
10338             siginfo_t uinfo;
10339 
10340             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10341             if (!p) {
10342                 return -TARGET_EFAULT;
10343             }
10344             target_to_host_siginfo(&uinfo, p);
10345             unlock_user(p, arg4, 0);
10346             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10347         }
10348         return ret;
10349 #ifdef TARGET_NR_sigreturn
10350     case TARGET_NR_sigreturn:
10351         if (block_signals()) {
10352             return -QEMU_ERESTARTSYS;
10353         }
10354         return do_sigreturn(cpu_env);
10355 #endif
10356     case TARGET_NR_rt_sigreturn:
10357         if (block_signals()) {
10358             return -QEMU_ERESTARTSYS;
10359         }
10360         return do_rt_sigreturn(cpu_env);
10361     case TARGET_NR_sethostname:
10362         if (!(p = lock_user_string(arg1)))
10363             return -TARGET_EFAULT;
10364         ret = get_errno(sethostname(p, arg2));
10365         unlock_user(p, arg1, 0);
10366         return ret;
10367 #ifdef TARGET_NR_setrlimit
10368     case TARGET_NR_setrlimit:
10369         {
10370             int resource = target_to_host_resource(arg1);
10371             struct target_rlimit *target_rlim;
10372             struct rlimit rlim;
10373             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10374                 return -TARGET_EFAULT;
10375             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10376             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10377             unlock_user_struct(target_rlim, arg2, 0);
10378             /*
10379              * If we just passed through resource limit settings for memory then
10380              * they would also apply to QEMU's own allocations, and QEMU will
10381              * crash or hang or die if its allocations fail. Ideally we would
10382              * track the guest allocations in QEMU and apply the limits ourselves.
10383              * For now, just tell the guest the call succeeded but don't actually
10384              * limit anything.
10385              */
10386             if (resource != RLIMIT_AS &&
10387                 resource != RLIMIT_DATA &&
10388                 resource != RLIMIT_STACK) {
10389                 return get_errno(setrlimit(resource, &rlim));
10390             } else {
10391                 return 0;
10392             }
10393         }
10394 #endif
10395 #ifdef TARGET_NR_getrlimit
10396     case TARGET_NR_getrlimit:
10397         {
10398             int resource = target_to_host_resource(arg1);
10399             struct target_rlimit *target_rlim;
10400             struct rlimit rlim;
10401 
10402             ret = get_errno(getrlimit(resource, &rlim));
10403             if (!is_error(ret)) {
10404                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10405                     return -TARGET_EFAULT;
10406                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10407                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10408                 unlock_user_struct(target_rlim, arg2, 1);
10409             }
10410         }
10411         return ret;
10412 #endif
10413     case TARGET_NR_getrusage:
10414         {
10415             struct rusage rusage;
10416             ret = get_errno(getrusage(arg1, &rusage));
10417             if (!is_error(ret)) {
10418                 ret = host_to_target_rusage(arg2, &rusage);
10419             }
10420         }
10421         return ret;
10422 #if defined(TARGET_NR_gettimeofday)
10423     case TARGET_NR_gettimeofday:
10424         {
10425             struct timeval tv;
10426             struct timezone tz;
10427 
10428             ret = get_errno(gettimeofday(&tv, &tz));
10429             if (!is_error(ret)) {
10430                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10431                     return -TARGET_EFAULT;
10432                 }
10433                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10434                     return -TARGET_EFAULT;
10435                 }
10436             }
10437         }
10438         return ret;
10439 #endif
10440 #if defined(TARGET_NR_settimeofday)
10441     case TARGET_NR_settimeofday:
10442         {
10443             struct timeval tv, *ptv = NULL;
10444             struct timezone tz, *ptz = NULL;
10445 
10446             if (arg1) {
10447                 if (copy_from_user_timeval(&tv, arg1)) {
10448                     return -TARGET_EFAULT;
10449                 }
10450                 ptv = &tv;
10451             }
10452 
10453             if (arg2) {
10454                 if (copy_from_user_timezone(&tz, arg2)) {
10455                     return -TARGET_EFAULT;
10456                 }
10457                 ptz = &tz;
10458             }
10459 
10460             return get_errno(settimeofday(ptv, ptz));
10461         }
10462 #endif
10463 #if defined(TARGET_NR_select)
10464     case TARGET_NR_select:
10465 #if defined(TARGET_WANT_NI_OLD_SELECT)
10466         /* some architectures used to have old_select here
10467          * but now ENOSYS it.
10468          */
10469         ret = -TARGET_ENOSYS;
10470 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10471         ret = do_old_select(arg1);
10472 #else
10473         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10474 #endif
10475         return ret;
10476 #endif
10477 #ifdef TARGET_NR_pselect6
10478     case TARGET_NR_pselect6:
10479         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10480 #endif
10481 #ifdef TARGET_NR_pselect6_time64
10482     case TARGET_NR_pselect6_time64:
10483         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10484 #endif
10485 #ifdef TARGET_NR_symlink
10486     case TARGET_NR_symlink:
10487         {
10488             void *p2;
10489             p = lock_user_string(arg1);
10490             p2 = lock_user_string(arg2);
10491             if (!p || !p2)
10492                 ret = -TARGET_EFAULT;
10493             else
10494                 ret = get_errno(symlink(p, p2));
10495             unlock_user(p2, arg2, 0);
10496             unlock_user(p, arg1, 0);
10497         }
10498         return ret;
10499 #endif
10500 #if defined(TARGET_NR_symlinkat)
10501     case TARGET_NR_symlinkat:
10502         {
10503             void *p2;
10504             p  = lock_user_string(arg1);
10505             p2 = lock_user_string(arg3);
10506             if (!p || !p2)
10507                 ret = -TARGET_EFAULT;
10508             else
10509                 ret = get_errno(symlinkat(p, arg2, p2));
10510             unlock_user(p2, arg3, 0);
10511             unlock_user(p, arg1, 0);
10512         }
10513         return ret;
10514 #endif
10515 #ifdef TARGET_NR_readlink
10516     case TARGET_NR_readlink:
10517         {
10518             void *p2;
10519             p = lock_user_string(arg1);
10520             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10521             ret = get_errno(do_guest_readlink(p, p2, arg3));
10522             unlock_user(p2, arg2, ret);
10523             unlock_user(p, arg1, 0);
10524         }
10525         return ret;
10526 #endif
10527 #if defined(TARGET_NR_readlinkat)
10528     case TARGET_NR_readlinkat:
10529         {
10530             void *p2;
10531             p  = lock_user_string(arg2);
10532             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10533             if (!p || !p2) {
10534                 ret = -TARGET_EFAULT;
10535             } else if (!arg4) {
10536                 /* Short circuit this for the magic exe check. */
10537                 ret = -TARGET_EINVAL;
10538             } else if (is_proc_myself((const char *)p, "exe")) {
10539                 /*
10540                  * Don't worry about sign mismatch as earlier mapping
10541                  * logic would have thrown a bad address error.
10542                  */
10543                 ret = MIN(strlen(exec_path), arg4);
10544                 /* We cannot NUL terminate the string. */
10545                 memcpy(p2, exec_path, ret);
10546             } else {
10547                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10548             }
10549             unlock_user(p2, arg3, ret);
10550             unlock_user(p, arg2, 0);
10551         }
10552         return ret;
10553 #endif
10554 #ifdef TARGET_NR_swapon
10555     case TARGET_NR_swapon:
10556         if (!(p = lock_user_string(arg1)))
10557             return -TARGET_EFAULT;
10558         ret = get_errno(swapon(p, arg2));
10559         unlock_user(p, arg1, 0);
10560         return ret;
10561 #endif
10562     case TARGET_NR_reboot:
10563         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10564            /* arg4 must be ignored in all other cases */
10565            p = lock_user_string(arg4);
10566            if (!p) {
10567                return -TARGET_EFAULT;
10568            }
10569            ret = get_errno(reboot(arg1, arg2, arg3, p));
10570            unlock_user(p, arg4, 0);
10571         } else {
10572            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10573         }
10574         return ret;
10575 #ifdef TARGET_NR_mmap
10576     case TARGET_NR_mmap:
10577 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10578     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10579     defined(TARGET_M68K) || defined(TARGET_MICROBLAZE) \
10580     || defined(TARGET_S390X)
10581         {
10582             abi_ulong *v;
10583             abi_ulong v1, v2, v3, v4, v5, v6;
10584             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10585                 return -TARGET_EFAULT;
10586             v1 = tswapal(v[0]);
10587             v2 = tswapal(v[1]);
10588             v3 = tswapal(v[2]);
10589             v4 = tswapal(v[3]);
10590             v5 = tswapal(v[4]);
10591             v6 = tswapal(v[5]);
10592             unlock_user(v, arg1, 0);
10593             return do_mmap(v1, v2, v3, v4, v5, v6);
10594         }
10595 #else
10596         /* mmap pointers are always untagged */
10597         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10598 #endif
10599 #endif
10600 #ifdef TARGET_NR_mmap2
10601     case TARGET_NR_mmap2:
10602 #ifndef MMAP_SHIFT
10603 #define MMAP_SHIFT 12
10604 #endif
10605         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10606                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10607 #endif
10608     case TARGET_NR_munmap:
10609         arg1 = cpu_untagged_addr(cpu, arg1);
10610         return get_errno(target_munmap(arg1, arg2));
10611     case TARGET_NR_mprotect:
10612         arg1 = cpu_untagged_addr(cpu, arg1);
10613         {
10614             TaskState *ts = get_task_state(cpu);
10615             /* Special hack to detect libc making the stack executable.  */
10616             if ((arg3 & PROT_GROWSDOWN)
10617                 && arg1 >= ts->info->stack_limit
10618                 && arg1 <= ts->info->start_stack) {
10619                 arg3 &= ~PROT_GROWSDOWN;
10620                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10621                 arg1 = ts->info->stack_limit;
10622             }
10623         }
10624         return get_errno(target_mprotect(arg1, arg2, arg3));
10625 #ifdef TARGET_NR_mremap
10626     case TARGET_NR_mremap:
10627         arg1 = cpu_untagged_addr(cpu, arg1);
10628         /* mremap new_addr (arg5) is always untagged */
10629         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10630 #endif
10631         /* ??? msync/mlock/munlock are broken for softmmu.  */
10632 #ifdef TARGET_NR_msync
10633     case TARGET_NR_msync:
10634         return get_errno(msync(g2h(cpu, arg1), arg2,
10635                                target_to_host_msync_arg(arg3)));
10636 #endif
10637 #ifdef TARGET_NR_mlock
10638     case TARGET_NR_mlock:
10639         return get_errno(mlock(g2h(cpu, arg1), arg2));
10640 #endif
10641 #ifdef TARGET_NR_munlock
10642     case TARGET_NR_munlock:
10643         return get_errno(munlock(g2h(cpu, arg1), arg2));
10644 #endif
10645 #ifdef TARGET_NR_mlockall
10646     case TARGET_NR_mlockall:
10647         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10648 #endif
10649 #ifdef TARGET_NR_munlockall
10650     case TARGET_NR_munlockall:
10651         return get_errno(munlockall());
10652 #endif
10653 #ifdef TARGET_NR_truncate
10654     case TARGET_NR_truncate:
10655         if (!(p = lock_user_string(arg1)))
10656             return -TARGET_EFAULT;
10657         ret = get_errno(truncate(p, arg2));
10658         unlock_user(p, arg1, 0);
10659         return ret;
10660 #endif
10661 #ifdef TARGET_NR_ftruncate
10662     case TARGET_NR_ftruncate:
10663         return get_errno(ftruncate(arg1, arg2));
10664 #endif
10665     case TARGET_NR_fchmod:
10666         return get_errno(fchmod(arg1, arg2));
10667 #if defined(TARGET_NR_fchmodat)
10668     case TARGET_NR_fchmodat:
10669         if (!(p = lock_user_string(arg2)))
10670             return -TARGET_EFAULT;
10671         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10672         unlock_user(p, arg2, 0);
10673         return ret;
10674 #endif
10675     case TARGET_NR_getpriority:
10676         /* Note that negative values are valid for getpriority, so we must
10677            differentiate based on errno settings.  */
10678         errno = 0;
10679         ret = getpriority(arg1, arg2);
10680         if (ret == -1 && errno != 0) {
10681             return -host_to_target_errno(errno);
10682         }
10683 #ifdef TARGET_ALPHA
10684         /* Return value is the unbiased priority.  Signal no error.  */
10685         cpu_env->ir[IR_V0] = 0;
10686 #else
10687         /* Return value is a biased priority to avoid negative numbers.  */
10688         ret = 20 - ret;
10689 #endif
10690         return ret;
10691     case TARGET_NR_setpriority:
10692         return get_errno(setpriority(arg1, arg2, arg3));
10693 #ifdef TARGET_NR_statfs
10694     case TARGET_NR_statfs:
10695         if (!(p = lock_user_string(arg1))) {
10696             return -TARGET_EFAULT;
10697         }
10698         ret = get_errno(statfs(path(p), &stfs));
10699         unlock_user(p, arg1, 0);
10700     convert_statfs:
10701         if (!is_error(ret)) {
10702             struct target_statfs *target_stfs;
10703 
10704             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10705                 return -TARGET_EFAULT;
10706             __put_user(stfs.f_type, &target_stfs->f_type);
10707             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10708             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10709             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10710             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10711             __put_user(stfs.f_files, &target_stfs->f_files);
10712             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10713             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10714             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10715             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10716             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10717 #ifdef _STATFS_F_FLAGS
10718             __put_user(stfs.f_flags, &target_stfs->f_flags);
10719 #else
10720             __put_user(0, &target_stfs->f_flags);
10721 #endif
10722             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10723             unlock_user_struct(target_stfs, arg2, 1);
10724         }
10725         return ret;
10726 #endif
10727 #ifdef TARGET_NR_fstatfs
10728     case TARGET_NR_fstatfs:
10729         ret = get_errno(fstatfs(arg1, &stfs));
10730         goto convert_statfs;
10731 #endif
10732 #ifdef TARGET_NR_statfs64
10733     case TARGET_NR_statfs64:
10734         if (!(p = lock_user_string(arg1))) {
10735             return -TARGET_EFAULT;
10736         }
10737         ret = get_errno(statfs(path(p), &stfs));
10738         unlock_user(p, arg1, 0);
10739     convert_statfs64:
10740         if (!is_error(ret)) {
10741             struct target_statfs64 *target_stfs;
10742 
10743             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10744                 return -TARGET_EFAULT;
10745             __put_user(stfs.f_type, &target_stfs->f_type);
10746             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10747             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10748             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10749             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10750             __put_user(stfs.f_files, &target_stfs->f_files);
10751             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10752             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10753             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10754             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10755             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10756 #ifdef _STATFS_F_FLAGS
10757             __put_user(stfs.f_flags, &target_stfs->f_flags);
10758 #else
10759             __put_user(0, &target_stfs->f_flags);
10760 #endif
10761             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10762             unlock_user_struct(target_stfs, arg3, 1);
10763         }
10764         return ret;
10765     case TARGET_NR_fstatfs64:
10766         ret = get_errno(fstatfs(arg1, &stfs));
10767         goto convert_statfs64;
10768 #endif
10769 #ifdef TARGET_NR_socketcall
10770     case TARGET_NR_socketcall:
10771         return do_socketcall(arg1, arg2);
10772 #endif
10773 #ifdef TARGET_NR_accept
10774     case TARGET_NR_accept:
10775         return do_accept4(arg1, arg2, arg3, 0);
10776 #endif
10777 #ifdef TARGET_NR_accept4
10778     case TARGET_NR_accept4:
10779         return do_accept4(arg1, arg2, arg3, arg4);
10780 #endif
10781 #ifdef TARGET_NR_bind
10782     case TARGET_NR_bind:
10783         return do_bind(arg1, arg2, arg3);
10784 #endif
10785 #ifdef TARGET_NR_connect
10786     case TARGET_NR_connect:
10787         return do_connect(arg1, arg2, arg3);
10788 #endif
10789 #ifdef TARGET_NR_getpeername
10790     case TARGET_NR_getpeername:
10791         return do_getpeername(arg1, arg2, arg3);
10792 #endif
10793 #ifdef TARGET_NR_getsockname
10794     case TARGET_NR_getsockname:
10795         return do_getsockname(arg1, arg2, arg3);
10796 #endif
10797 #ifdef TARGET_NR_getsockopt
10798     case TARGET_NR_getsockopt:
10799         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10800 #endif
10801 #ifdef TARGET_NR_listen
10802     case TARGET_NR_listen:
10803         return get_errno(listen(arg1, arg2));
10804 #endif
10805 #ifdef TARGET_NR_recv
10806     case TARGET_NR_recv:
10807         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10808 #endif
10809 #ifdef TARGET_NR_recvfrom
10810     case TARGET_NR_recvfrom:
10811         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10812 #endif
10813 #ifdef TARGET_NR_recvmsg
10814     case TARGET_NR_recvmsg:
10815         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10816 #endif
10817 #ifdef TARGET_NR_send
10818     case TARGET_NR_send:
10819         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10820 #endif
10821 #ifdef TARGET_NR_sendmsg
10822     case TARGET_NR_sendmsg:
10823         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10824 #endif
10825 #ifdef TARGET_NR_sendmmsg
10826     case TARGET_NR_sendmmsg:
10827         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10828 #endif
10829 #ifdef TARGET_NR_recvmmsg
10830     case TARGET_NR_recvmmsg:
10831         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10832 #endif
10833 #ifdef TARGET_NR_sendto
10834     case TARGET_NR_sendto:
10835         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10836 #endif
10837 #ifdef TARGET_NR_shutdown
10838     case TARGET_NR_shutdown:
10839         return get_errno(shutdown(arg1, arg2));
10840 #endif
10841 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10842     case TARGET_NR_getrandom:
10843         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10844         if (!p) {
10845             return -TARGET_EFAULT;
10846         }
10847         ret = get_errno(getrandom(p, arg2, arg3));
10848         unlock_user(p, arg1, ret);
10849         return ret;
10850 #endif
10851 #ifdef TARGET_NR_socket
10852     case TARGET_NR_socket:
10853         return do_socket(arg1, arg2, arg3);
10854 #endif
10855 #ifdef TARGET_NR_socketpair
10856     case TARGET_NR_socketpair:
10857         return do_socketpair(arg1, arg2, arg3, arg4);
10858 #endif
10859 #ifdef TARGET_NR_setsockopt
10860     case TARGET_NR_setsockopt:
10861         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10862 #endif
10863 #if defined(TARGET_NR_syslog)
10864     case TARGET_NR_syslog:
10865         {
10866             int len = arg2;
10867 
10868             switch (arg1) {
10869             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10870             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10871             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10872             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10873             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10874             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10875             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10876             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10877                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10878             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10879             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10880             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10881                 {
10882                     if (len < 0) {
10883                         return -TARGET_EINVAL;
10884                     }
10885                     if (len == 0) {
10886                         return 0;
10887                     }
10888                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10889                     if (!p) {
10890                         return -TARGET_EFAULT;
10891                     }
10892                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10893                     unlock_user(p, arg2, arg3);
10894                 }
10895                 return ret;
10896             default:
10897                 return -TARGET_EINVAL;
10898             }
10899         }
10900         break;
10901 #endif
10902     case TARGET_NR_setitimer:
10903         {
10904             struct itimerval value, ovalue, *pvalue;
10905 
10906             if (arg2) {
10907                 pvalue = &value;
10908                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10909                     || copy_from_user_timeval(&pvalue->it_value,
10910                                               arg2 + sizeof(struct target_timeval)))
10911                     return -TARGET_EFAULT;
10912             } else {
10913                 pvalue = NULL;
10914             }
10915             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10916             if (!is_error(ret) && arg3) {
10917                 if (copy_to_user_timeval(arg3,
10918                                          &ovalue.it_interval)
10919                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10920                                             &ovalue.it_value))
10921                     return -TARGET_EFAULT;
10922             }
10923         }
10924         return ret;
10925     case TARGET_NR_getitimer:
10926         {
10927             struct itimerval value;
10928 
10929             ret = get_errno(getitimer(arg1, &value));
10930             if (!is_error(ret) && arg2) {
10931                 if (copy_to_user_timeval(arg2,
10932                                          &value.it_interval)
10933                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10934                                             &value.it_value))
10935                     return -TARGET_EFAULT;
10936             }
10937         }
10938         return ret;
10939 #ifdef TARGET_NR_stat
10940     case TARGET_NR_stat:
10941         if (!(p = lock_user_string(arg1))) {
10942             return -TARGET_EFAULT;
10943         }
10944         ret = get_errno(stat(path(p), &st));
10945         unlock_user(p, arg1, 0);
10946         goto do_stat;
10947 #endif
10948 #ifdef TARGET_NR_lstat
10949     case TARGET_NR_lstat:
10950         if (!(p = lock_user_string(arg1))) {
10951             return -TARGET_EFAULT;
10952         }
10953         ret = get_errno(lstat(path(p), &st));
10954         unlock_user(p, arg1, 0);
10955         goto do_stat;
10956 #endif
10957 #ifdef TARGET_NR_fstat
10958     case TARGET_NR_fstat:
10959         {
10960             ret = get_errno(fstat(arg1, &st));
10961 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10962         do_stat:
10963 #endif
10964             if (!is_error(ret)) {
10965                 struct target_stat *target_st;
10966 
10967                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10968                     return -TARGET_EFAULT;
10969                 memset(target_st, 0, sizeof(*target_st));
10970                 __put_user(st.st_dev, &target_st->st_dev);
10971                 __put_user(st.st_ino, &target_st->st_ino);
10972                 __put_user(st.st_mode, &target_st->st_mode);
10973                 __put_user(st.st_uid, &target_st->st_uid);
10974                 __put_user(st.st_gid, &target_st->st_gid);
10975                 __put_user(st.st_nlink, &target_st->st_nlink);
10976                 __put_user(st.st_rdev, &target_st->st_rdev);
10977                 __put_user(st.st_size, &target_st->st_size);
10978                 __put_user(st.st_blksize, &target_st->st_blksize);
10979                 __put_user(st.st_blocks, &target_st->st_blocks);
10980                 __put_user(st.st_atime, &target_st->target_st_atime);
10981                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10982                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10983 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10984                 __put_user(st.st_atim.tv_nsec,
10985                            &target_st->target_st_atime_nsec);
10986                 __put_user(st.st_mtim.tv_nsec,
10987                            &target_st->target_st_mtime_nsec);
10988                 __put_user(st.st_ctim.tv_nsec,
10989                            &target_st->target_st_ctime_nsec);
10990 #endif
10991                 unlock_user_struct(target_st, arg2, 1);
10992             }
10993         }
10994         return ret;
10995 #endif
10996     case TARGET_NR_vhangup:
10997         return get_errno(vhangup());
10998 #ifdef TARGET_NR_syscall
10999     case TARGET_NR_syscall:
11000         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11001                           arg6, arg7, arg8, 0);
11002 #endif
11003 #if defined(TARGET_NR_wait4)
11004     case TARGET_NR_wait4:
11005         {
11006             int status;
11007             abi_long status_ptr = arg2;
11008             struct rusage rusage, *rusage_ptr;
11009             abi_ulong target_rusage = arg4;
11010             abi_long rusage_err;
11011             if (target_rusage)
11012                 rusage_ptr = &rusage;
11013             else
11014                 rusage_ptr = NULL;
11015             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11016             if (!is_error(ret)) {
11017                 if (status_ptr && ret) {
11018                     status = host_to_target_waitstatus(status);
11019                     if (put_user_s32(status, status_ptr))
11020                         return -TARGET_EFAULT;
11021                 }
11022                 if (target_rusage) {
11023                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11024                     if (rusage_err) {
11025                         ret = rusage_err;
11026                     }
11027                 }
11028             }
11029         }
11030         return ret;
11031 #endif
11032 #ifdef TARGET_NR_swapoff
11033     case TARGET_NR_swapoff:
11034         if (!(p = lock_user_string(arg1)))
11035             return -TARGET_EFAULT;
11036         ret = get_errno(swapoff(p));
11037         unlock_user(p, arg1, 0);
11038         return ret;
11039 #endif
11040     case TARGET_NR_sysinfo:
11041         {
11042             struct target_sysinfo *target_value;
11043             struct sysinfo value;
11044             ret = get_errno(sysinfo(&value));
11045             if (!is_error(ret) && arg1)
11046             {
11047                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11048                     return -TARGET_EFAULT;
11049                 __put_user(value.uptime, &target_value->uptime);
11050                 __put_user(value.loads[0], &target_value->loads[0]);
11051                 __put_user(value.loads[1], &target_value->loads[1]);
11052                 __put_user(value.loads[2], &target_value->loads[2]);
11053                 __put_user(value.totalram, &target_value->totalram);
11054                 __put_user(value.freeram, &target_value->freeram);
11055                 __put_user(value.sharedram, &target_value->sharedram);
11056                 __put_user(value.bufferram, &target_value->bufferram);
11057                 __put_user(value.totalswap, &target_value->totalswap);
11058                 __put_user(value.freeswap, &target_value->freeswap);
11059                 __put_user(value.procs, &target_value->procs);
11060                 __put_user(value.totalhigh, &target_value->totalhigh);
11061                 __put_user(value.freehigh, &target_value->freehigh);
11062                 __put_user(value.mem_unit, &target_value->mem_unit);
11063                 unlock_user_struct(target_value, arg1, 1);
11064             }
11065         }
11066         return ret;
11067 #ifdef TARGET_NR_ipc
11068     case TARGET_NR_ipc:
11069         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11070 #endif
11071 #ifdef TARGET_NR_semget
11072     case TARGET_NR_semget:
11073         return get_errno(semget(arg1, arg2, arg3));
11074 #endif
11075 #ifdef TARGET_NR_semop
11076     case TARGET_NR_semop:
11077         return do_semtimedop(arg1, arg2, arg3, 0, false);
11078 #endif
11079 #ifdef TARGET_NR_semtimedop
11080     case TARGET_NR_semtimedop:
11081         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11082 #endif
11083 #ifdef TARGET_NR_semtimedop_time64
11084     case TARGET_NR_semtimedop_time64:
11085         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11086 #endif
11087 #ifdef TARGET_NR_semctl
11088     case TARGET_NR_semctl:
11089         return do_semctl(arg1, arg2, arg3, arg4);
11090 #endif
11091 #ifdef TARGET_NR_msgctl
11092     case TARGET_NR_msgctl:
11093         return do_msgctl(arg1, arg2, arg3);
11094 #endif
11095 #ifdef TARGET_NR_msgget
11096     case TARGET_NR_msgget:
11097         return get_errno(msgget(arg1, arg2));
11098 #endif
11099 #ifdef TARGET_NR_msgrcv
11100     case TARGET_NR_msgrcv:
11101         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11102 #endif
11103 #ifdef TARGET_NR_msgsnd
11104     case TARGET_NR_msgsnd:
11105         return do_msgsnd(arg1, arg2, arg3, arg4);
11106 #endif
11107 #ifdef TARGET_NR_shmget
11108     case TARGET_NR_shmget:
11109         return get_errno(shmget(arg1, arg2, arg3));
11110 #endif
11111 #ifdef TARGET_NR_shmctl
11112     case TARGET_NR_shmctl:
11113         return do_shmctl(arg1, arg2, arg3);
11114 #endif
11115 #ifdef TARGET_NR_shmat
11116     case TARGET_NR_shmat:
11117         return target_shmat(cpu_env, arg1, arg2, arg3);
11118 #endif
11119 #ifdef TARGET_NR_shmdt
11120     case TARGET_NR_shmdt:
11121         return target_shmdt(arg1);
11122 #endif
11123     case TARGET_NR_fsync:
11124         return get_errno(fsync(arg1));
11125     case TARGET_NR_clone:
11126         /* Linux manages to have three different orderings for its
11127          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11128          * match the kernel's CONFIG_CLONE_* settings.
11129          * Microblaze is further special in that it uses a sixth
11130          * implicit argument to clone for the TLS pointer.
11131          */
11132 #if defined(TARGET_MICROBLAZE)
11133         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11134 #elif defined(TARGET_CLONE_BACKWARDS)
11135         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11136 #elif defined(TARGET_CLONE_BACKWARDS2)
11137         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11138 #else
11139         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11140 #endif
11141         return ret;
11142 #ifdef __NR_exit_group
11143         /* new thread calls */
11144     case TARGET_NR_exit_group:
11145         preexit_cleanup(cpu_env, arg1);
11146         return get_errno(exit_group(arg1));
11147 #endif
11148     case TARGET_NR_setdomainname:
11149         if (!(p = lock_user_string(arg1)))
11150             return -TARGET_EFAULT;
11151         ret = get_errno(setdomainname(p, arg2));
11152         unlock_user(p, arg1, 0);
11153         return ret;
11154     case TARGET_NR_uname:
11155         /* no need to transcode because we use the linux syscall */
11156         {
11157             struct new_utsname * buf;
11158 
11159             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11160                 return -TARGET_EFAULT;
11161             ret = get_errno(sys_uname(buf));
11162             if (!is_error(ret)) {
11163                 /* Overwrite the native machine name with whatever is being
11164                    emulated. */
11165                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11166                           sizeof(buf->machine));
11167                 /* Allow the user to override the reported release.  */
11168                 if (qemu_uname_release && *qemu_uname_release) {
11169                     g_strlcpy(buf->release, qemu_uname_release,
11170                               sizeof(buf->release));
11171                 }
11172             }
11173             unlock_user_struct(buf, arg1, 1);
11174         }
11175         return ret;
11176 #ifdef TARGET_I386
11177     case TARGET_NR_modify_ldt:
11178         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11179 #if !defined(TARGET_X86_64)
11180     case TARGET_NR_vm86:
11181         return do_vm86(cpu_env, arg1, arg2);
11182 #endif
11183 #endif
11184 #if defined(TARGET_NR_adjtimex)
11185     case TARGET_NR_adjtimex:
11186         {
11187             struct timex host_buf;
11188 
11189             if (target_to_host_timex(&host_buf, arg1) != 0) {
11190                 return -TARGET_EFAULT;
11191             }
11192             ret = get_errno(adjtimex(&host_buf));
11193             if (!is_error(ret)) {
11194                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11195                     return -TARGET_EFAULT;
11196                 }
11197             }
11198         }
11199         return ret;
11200 #endif
11201 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11202     case TARGET_NR_clock_adjtime:
11203         {
11204             struct timex htx;
11205 
11206             if (target_to_host_timex(&htx, arg2) != 0) {
11207                 return -TARGET_EFAULT;
11208             }
11209             ret = get_errno(clock_adjtime(arg1, &htx));
11210             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11211                 return -TARGET_EFAULT;
11212             }
11213         }
11214         return ret;
11215 #endif
11216 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11217     case TARGET_NR_clock_adjtime64:
11218         {
11219             struct timex htx;
11220 
11221             if (target_to_host_timex64(&htx, arg2) != 0) {
11222                 return -TARGET_EFAULT;
11223             }
11224             ret = get_errno(clock_adjtime(arg1, &htx));
11225             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11226                     return -TARGET_EFAULT;
11227             }
11228         }
11229         return ret;
11230 #endif
11231     case TARGET_NR_getpgid:
11232         return get_errno(getpgid(arg1));
11233     case TARGET_NR_fchdir:
11234         return get_errno(fchdir(arg1));
11235     case TARGET_NR_personality:
11236         return get_errno(personality(arg1));
11237 #ifdef TARGET_NR__llseek /* Not on alpha */
11238     case TARGET_NR__llseek:
11239         {
11240             int64_t res;
11241 #if !defined(__NR_llseek)
11242             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11243             if (res == -1) {
11244                 ret = get_errno(res);
11245             } else {
11246                 ret = 0;
11247             }
11248 #else
11249             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11250 #endif
11251             if ((ret == 0) && put_user_s64(res, arg4)) {
11252                 return -TARGET_EFAULT;
11253             }
11254         }
11255         return ret;
11256 #endif
11257 #ifdef TARGET_NR_getdents
11258     case TARGET_NR_getdents:
11259         return do_getdents(arg1, arg2, arg3);
11260 #endif /* TARGET_NR_getdents */
11261 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11262     case TARGET_NR_getdents64:
11263         return do_getdents64(arg1, arg2, arg3);
11264 #endif /* TARGET_NR_getdents64 */
11265 #if defined(TARGET_NR__newselect)
11266     case TARGET_NR__newselect:
11267         return do_select(arg1, arg2, arg3, arg4, arg5);
11268 #endif
11269 #ifdef TARGET_NR_poll
11270     case TARGET_NR_poll:
11271         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11272 #endif
11273 #ifdef TARGET_NR_ppoll
11274     case TARGET_NR_ppoll:
11275         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11276 #endif
11277 #ifdef TARGET_NR_ppoll_time64
11278     case TARGET_NR_ppoll_time64:
11279         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11280 #endif
11281     case TARGET_NR_flock:
11282         /* NOTE: the flock constant seems to be the same for every
11283            Linux platform */
11284         return get_errno(safe_flock(arg1, arg2));
11285     case TARGET_NR_readv:
11286         {
11287             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11288             if (vec != NULL) {
11289                 ret = get_errno(safe_readv(arg1, vec, arg3));
11290                 unlock_iovec(vec, arg2, arg3, 1);
11291             } else {
11292                 ret = -host_to_target_errno(errno);
11293             }
11294         }
11295         return ret;
11296     case TARGET_NR_writev:
11297         {
11298             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11299             if (vec != NULL) {
11300                 ret = get_errno(safe_writev(arg1, vec, arg3));
11301                 unlock_iovec(vec, arg2, arg3, 0);
11302             } else {
11303                 ret = -host_to_target_errno(errno);
11304             }
11305         }
11306         return ret;
11307 #if defined(TARGET_NR_preadv)
11308     case TARGET_NR_preadv:
11309         {
11310             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11311             if (vec != NULL) {
11312                 unsigned long low, high;
11313 
11314                 target_to_host_low_high(arg4, arg5, &low, &high);
11315                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11316                 unlock_iovec(vec, arg2, arg3, 1);
11317             } else {
11318                 ret = -host_to_target_errno(errno);
11319            }
11320         }
11321         return ret;
11322 #endif
11323 #if defined(TARGET_NR_pwritev)
11324     case TARGET_NR_pwritev:
11325         {
11326             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11327             if (vec != NULL) {
11328                 unsigned long low, high;
11329 
11330                 target_to_host_low_high(arg4, arg5, &low, &high);
11331                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11332                 unlock_iovec(vec, arg2, arg3, 0);
11333             } else {
11334                 ret = -host_to_target_errno(errno);
11335            }
11336         }
11337         return ret;
11338 #endif
11339     case TARGET_NR_getsid:
11340         return get_errno(getsid(arg1));
11341 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11342     case TARGET_NR_fdatasync:
11343         return get_errno(fdatasync(arg1));
11344 #endif
11345     case TARGET_NR_sched_getaffinity:
11346         {
11347             unsigned int mask_size;
11348             unsigned long *mask;
11349 
11350             /*
11351              * sched_getaffinity needs multiples of ulong, so need to take
11352              * care of mismatches between target ulong and host ulong sizes.
11353              */
11354             if (arg2 & (sizeof(abi_ulong) - 1)) {
11355                 return -TARGET_EINVAL;
11356             }
11357             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11358 
11359             mask = alloca(mask_size);
11360             memset(mask, 0, mask_size);
11361             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11362 
11363             if (!is_error(ret)) {
11364                 if (ret > arg2) {
11365                     /* More data returned than the caller's buffer will fit.
11366                      * This only happens if sizeof(abi_long) < sizeof(long)
11367                      * and the caller passed us a buffer holding an odd number
11368                      * of abi_longs. If the host kernel is actually using the
11369                      * extra 4 bytes then fail EINVAL; otherwise we can just
11370                      * ignore them and only copy the interesting part.
11371                      */
11372                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11373                     if (numcpus > arg2 * 8) {
11374                         return -TARGET_EINVAL;
11375                     }
11376                     ret = arg2;
11377                 }
11378 
11379                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11380                     return -TARGET_EFAULT;
11381                 }
11382             }
11383         }
11384         return ret;
11385     case TARGET_NR_sched_setaffinity:
11386         {
11387             unsigned int mask_size;
11388             unsigned long *mask;
11389 
11390             /*
11391              * sched_setaffinity needs multiples of ulong, so need to take
11392              * care of mismatches between target ulong and host ulong sizes.
11393              */
11394             if (arg2 & (sizeof(abi_ulong) - 1)) {
11395                 return -TARGET_EINVAL;
11396             }
11397             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11398             mask = alloca(mask_size);
11399 
11400             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11401             if (ret) {
11402                 return ret;
11403             }
11404 
11405             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11406         }
11407     case TARGET_NR_getcpu:
11408         {
11409             unsigned cpuid, node;
11410             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11411                                        arg2 ? &node : NULL,
11412                                        NULL));
11413             if (is_error(ret)) {
11414                 return ret;
11415             }
11416             if (arg1 && put_user_u32(cpuid, arg1)) {
11417                 return -TARGET_EFAULT;
11418             }
11419             if (arg2 && put_user_u32(node, arg2)) {
11420                 return -TARGET_EFAULT;
11421             }
11422         }
11423         return ret;
11424     case TARGET_NR_sched_setparam:
11425         {
11426             struct target_sched_param *target_schp;
11427             struct sched_param schp;
11428 
11429             if (arg2 == 0) {
11430                 return -TARGET_EINVAL;
11431             }
11432             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11433                 return -TARGET_EFAULT;
11434             }
11435             schp.sched_priority = tswap32(target_schp->sched_priority);
11436             unlock_user_struct(target_schp, arg2, 0);
11437             return get_errno(sys_sched_setparam(arg1, &schp));
11438         }
11439     case TARGET_NR_sched_getparam:
11440         {
11441             struct target_sched_param *target_schp;
11442             struct sched_param schp;
11443 
11444             if (arg2 == 0) {
11445                 return -TARGET_EINVAL;
11446             }
11447             ret = get_errno(sys_sched_getparam(arg1, &schp));
11448             if (!is_error(ret)) {
11449                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11450                     return -TARGET_EFAULT;
11451                 }
11452                 target_schp->sched_priority = tswap32(schp.sched_priority);
11453                 unlock_user_struct(target_schp, arg2, 1);
11454             }
11455         }
11456         return ret;
11457     case TARGET_NR_sched_setscheduler:
11458         {
11459             struct target_sched_param *target_schp;
11460             struct sched_param schp;
11461             if (arg3 == 0) {
11462                 return -TARGET_EINVAL;
11463             }
11464             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11465                 return -TARGET_EFAULT;
11466             }
11467             schp.sched_priority = tswap32(target_schp->sched_priority);
11468             unlock_user_struct(target_schp, arg3, 0);
11469             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11470         }
11471     case TARGET_NR_sched_getscheduler:
11472         return get_errno(sys_sched_getscheduler(arg1));
11473     case TARGET_NR_sched_getattr:
11474         {
11475             struct target_sched_attr *target_scha;
11476             struct sched_attr scha;
11477             if (arg2 == 0) {
11478                 return -TARGET_EINVAL;
11479             }
11480             if (arg3 > sizeof(scha)) {
11481                 arg3 = sizeof(scha);
11482             }
11483             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11484             if (!is_error(ret)) {
11485                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11486                 if (!target_scha) {
11487                     return -TARGET_EFAULT;
11488                 }
11489                 target_scha->size = tswap32(scha.size);
11490                 target_scha->sched_policy = tswap32(scha.sched_policy);
11491                 target_scha->sched_flags = tswap64(scha.sched_flags);
11492                 target_scha->sched_nice = tswap32(scha.sched_nice);
11493                 target_scha->sched_priority = tswap32(scha.sched_priority);
11494                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11495                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11496                 target_scha->sched_period = tswap64(scha.sched_period);
11497                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11498                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11499                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11500                 }
11501                 unlock_user(target_scha, arg2, arg3);
11502             }
11503             return ret;
11504         }
11505     case TARGET_NR_sched_setattr:
11506         {
11507             struct target_sched_attr *target_scha;
11508             struct sched_attr scha;
11509             uint32_t size;
11510             int zeroed;
11511             if (arg2 == 0) {
11512                 return -TARGET_EINVAL;
11513             }
11514             if (get_user_u32(size, arg2)) {
11515                 return -TARGET_EFAULT;
11516             }
11517             if (!size) {
11518                 size = offsetof(struct target_sched_attr, sched_util_min);
11519             }
11520             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11521                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11522                     return -TARGET_EFAULT;
11523                 }
11524                 return -TARGET_E2BIG;
11525             }
11526 
11527             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11528             if (zeroed < 0) {
11529                 return zeroed;
11530             } else if (zeroed == 0) {
11531                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11532                     return -TARGET_EFAULT;
11533                 }
11534                 return -TARGET_E2BIG;
11535             }
11536             if (size > sizeof(struct target_sched_attr)) {
11537                 size = sizeof(struct target_sched_attr);
11538             }
11539 
11540             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11541             if (!target_scha) {
11542                 return -TARGET_EFAULT;
11543             }
11544             scha.size = size;
11545             scha.sched_policy = tswap32(target_scha->sched_policy);
11546             scha.sched_flags = tswap64(target_scha->sched_flags);
11547             scha.sched_nice = tswap32(target_scha->sched_nice);
11548             scha.sched_priority = tswap32(target_scha->sched_priority);
11549             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11550             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11551             scha.sched_period = tswap64(target_scha->sched_period);
11552             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11553                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11554                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11555             }
11556             unlock_user(target_scha, arg2, 0);
11557             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11558         }
11559     case TARGET_NR_sched_yield:
11560         return get_errno(sched_yield());
11561     case TARGET_NR_sched_get_priority_max:
11562         return get_errno(sched_get_priority_max(arg1));
11563     case TARGET_NR_sched_get_priority_min:
11564         return get_errno(sched_get_priority_min(arg1));
11565 #ifdef TARGET_NR_sched_rr_get_interval
11566     case TARGET_NR_sched_rr_get_interval:
11567         {
11568             struct timespec ts;
11569             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11570             if (!is_error(ret)) {
11571                 ret = host_to_target_timespec(arg2, &ts);
11572             }
11573         }
11574         return ret;
11575 #endif
11576 #ifdef TARGET_NR_sched_rr_get_interval_time64
11577     case TARGET_NR_sched_rr_get_interval_time64:
11578         {
11579             struct timespec ts;
11580             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11581             if (!is_error(ret)) {
11582                 ret = host_to_target_timespec64(arg2, &ts);
11583             }
11584         }
11585         return ret;
11586 #endif
11587 #if defined(TARGET_NR_nanosleep)
11588     case TARGET_NR_nanosleep:
11589         {
11590             struct timespec req, rem;
11591             target_to_host_timespec(&req, arg1);
11592             ret = get_errno(safe_nanosleep(&req, &rem));
11593             if (is_error(ret) && arg2) {
11594                 host_to_target_timespec(arg2, &rem);
11595             }
11596         }
11597         return ret;
11598 #endif
11599     case TARGET_NR_prctl:
11600         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11601         break;
11602 #ifdef TARGET_NR_arch_prctl
11603     case TARGET_NR_arch_prctl:
11604         return do_arch_prctl(cpu_env, arg1, arg2);
11605 #endif
11606 #ifdef TARGET_NR_pread64
11607     case TARGET_NR_pread64:
11608         if (regpairs_aligned(cpu_env, num)) {
11609             arg4 = arg5;
11610             arg5 = arg6;
11611         }
11612         if (arg2 == 0 && arg3 == 0) {
11613             /* Special-case NULL buffer and zero length, which should succeed */
11614             p = 0;
11615         } else {
11616             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11617             if (!p) {
11618                 return -TARGET_EFAULT;
11619             }
11620         }
11621         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11622         unlock_user(p, arg2, ret);
11623         return ret;
11624     case TARGET_NR_pwrite64:
11625         if (regpairs_aligned(cpu_env, num)) {
11626             arg4 = arg5;
11627             arg5 = arg6;
11628         }
11629         if (arg2 == 0 && arg3 == 0) {
11630             /* Special-case NULL buffer and zero length, which should succeed */
11631             p = 0;
11632         } else {
11633             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11634             if (!p) {
11635                 return -TARGET_EFAULT;
11636             }
11637         }
11638         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11639         unlock_user(p, arg2, 0);
11640         return ret;
11641 #endif
11642     case TARGET_NR_getcwd:
11643         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11644             return -TARGET_EFAULT;
11645         ret = get_errno(sys_getcwd1(p, arg2));
11646         unlock_user(p, arg1, ret);
11647         return ret;
11648     case TARGET_NR_capget:
11649     case TARGET_NR_capset:
11650     {
11651         struct target_user_cap_header *target_header;
11652         struct target_user_cap_data *target_data = NULL;
11653         struct __user_cap_header_struct header;
11654         struct __user_cap_data_struct data[2];
11655         struct __user_cap_data_struct *dataptr = NULL;
11656         int i, target_datalen;
11657         int data_items = 1;
11658 
11659         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11660             return -TARGET_EFAULT;
11661         }
11662         header.version = tswap32(target_header->version);
11663         header.pid = tswap32(target_header->pid);
11664 
11665         if (header.version != _LINUX_CAPABILITY_VERSION) {
11666             /* Version 2 and up takes pointer to two user_data structs */
11667             data_items = 2;
11668         }
11669 
11670         target_datalen = sizeof(*target_data) * data_items;
11671 
11672         if (arg2) {
11673             if (num == TARGET_NR_capget) {
11674                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11675             } else {
11676                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11677             }
11678             if (!target_data) {
11679                 unlock_user_struct(target_header, arg1, 0);
11680                 return -TARGET_EFAULT;
11681             }
11682 
11683             if (num == TARGET_NR_capset) {
11684                 for (i = 0; i < data_items; i++) {
11685                     data[i].effective = tswap32(target_data[i].effective);
11686                     data[i].permitted = tswap32(target_data[i].permitted);
11687                     data[i].inheritable = tswap32(target_data[i].inheritable);
11688                 }
11689             }
11690 
11691             dataptr = data;
11692         }
11693 
11694         if (num == TARGET_NR_capget) {
11695             ret = get_errno(capget(&header, dataptr));
11696         } else {
11697             ret = get_errno(capset(&header, dataptr));
11698         }
11699 
11700         /* The kernel always updates version for both capget and capset */
11701         target_header->version = tswap32(header.version);
11702         unlock_user_struct(target_header, arg1, 1);
11703 
11704         if (arg2) {
11705             if (num == TARGET_NR_capget) {
11706                 for (i = 0; i < data_items; i++) {
11707                     target_data[i].effective = tswap32(data[i].effective);
11708                     target_data[i].permitted = tswap32(data[i].permitted);
11709                     target_data[i].inheritable = tswap32(data[i].inheritable);
11710                 }
11711                 unlock_user(target_data, arg2, target_datalen);
11712             } else {
11713                 unlock_user(target_data, arg2, 0);
11714             }
11715         }
11716         return ret;
11717     }
11718     case TARGET_NR_sigaltstack:
11719         return do_sigaltstack(arg1, arg2, cpu_env);
11720 
11721 #ifdef CONFIG_SENDFILE
11722 #ifdef TARGET_NR_sendfile
11723     case TARGET_NR_sendfile:
11724     {
11725         off_t *offp = NULL;
11726         off_t off;
11727         if (arg3) {
11728             ret = get_user_sal(off, arg3);
11729             if (is_error(ret)) {
11730                 return ret;
11731             }
11732             offp = &off;
11733         }
11734         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11735         if (!is_error(ret) && arg3) {
11736             abi_long ret2 = put_user_sal(off, arg3);
11737             if (is_error(ret2)) {
11738                 ret = ret2;
11739             }
11740         }
11741         return ret;
11742     }
11743 #endif
11744 #ifdef TARGET_NR_sendfile64
11745     case TARGET_NR_sendfile64:
11746     {
11747         off_t *offp = NULL;
11748         off_t off;
11749         if (arg3) {
11750             ret = get_user_s64(off, arg3);
11751             if (is_error(ret)) {
11752                 return ret;
11753             }
11754             offp = &off;
11755         }
11756         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11757         if (!is_error(ret) && arg3) {
11758             abi_long ret2 = put_user_s64(off, arg3);
11759             if (is_error(ret2)) {
11760                 ret = ret2;
11761             }
11762         }
11763         return ret;
11764     }
11765 #endif
11766 #endif
11767 #ifdef TARGET_NR_vfork
11768     case TARGET_NR_vfork:
11769         return get_errno(do_fork(cpu_env,
11770                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11771                          0, 0, 0, 0));
11772 #endif
11773 #ifdef TARGET_NR_ugetrlimit
11774     case TARGET_NR_ugetrlimit:
11775     {
11776 	struct rlimit rlim;
11777 	int resource = target_to_host_resource(arg1);
11778 	ret = get_errno(getrlimit(resource, &rlim));
11779 	if (!is_error(ret)) {
11780 	    struct target_rlimit *target_rlim;
11781             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11782                 return -TARGET_EFAULT;
11783 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11784 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11785             unlock_user_struct(target_rlim, arg2, 1);
11786 	}
11787         return ret;
11788     }
11789 #endif
11790 #ifdef TARGET_NR_truncate64
11791     case TARGET_NR_truncate64:
11792         if (!(p = lock_user_string(arg1)))
11793             return -TARGET_EFAULT;
11794 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11795         unlock_user(p, arg1, 0);
11796         return ret;
11797 #endif
11798 #ifdef TARGET_NR_ftruncate64
11799     case TARGET_NR_ftruncate64:
11800         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11801 #endif
11802 #ifdef TARGET_NR_stat64
11803     case TARGET_NR_stat64:
11804         if (!(p = lock_user_string(arg1))) {
11805             return -TARGET_EFAULT;
11806         }
11807         ret = get_errno(stat(path(p), &st));
11808         unlock_user(p, arg1, 0);
11809         if (!is_error(ret))
11810             ret = host_to_target_stat64(cpu_env, arg2, &st);
11811         return ret;
11812 #endif
11813 #ifdef TARGET_NR_lstat64
11814     case TARGET_NR_lstat64:
11815         if (!(p = lock_user_string(arg1))) {
11816             return -TARGET_EFAULT;
11817         }
11818         ret = get_errno(lstat(path(p), &st));
11819         unlock_user(p, arg1, 0);
11820         if (!is_error(ret))
11821             ret = host_to_target_stat64(cpu_env, arg2, &st);
11822         return ret;
11823 #endif
11824 #ifdef TARGET_NR_fstat64
11825     case TARGET_NR_fstat64:
11826         ret = get_errno(fstat(arg1, &st));
11827         if (!is_error(ret))
11828             ret = host_to_target_stat64(cpu_env, arg2, &st);
11829         return ret;
11830 #endif
11831 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11832 #ifdef TARGET_NR_fstatat64
11833     case TARGET_NR_fstatat64:
11834 #endif
11835 #ifdef TARGET_NR_newfstatat
11836     case TARGET_NR_newfstatat:
11837 #endif
11838         if (!(p = lock_user_string(arg2))) {
11839             return -TARGET_EFAULT;
11840         }
11841         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11842         unlock_user(p, arg2, 0);
11843         if (!is_error(ret))
11844             ret = host_to_target_stat64(cpu_env, arg3, &st);
11845         return ret;
11846 #endif
11847 #if defined(TARGET_NR_statx)
11848     case TARGET_NR_statx:
11849         {
11850             struct target_statx *target_stx;
11851             int dirfd = arg1;
11852             int flags = arg3;
11853 
11854             p = lock_user_string(arg2);
11855             if (p == NULL) {
11856                 return -TARGET_EFAULT;
11857             }
11858 #if defined(__NR_statx)
11859             {
11860                 /*
11861                  * It is assumed that struct statx is architecture independent.
11862                  */
11863                 struct target_statx host_stx;
11864                 int mask = arg4;
11865 
11866                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11867                 if (!is_error(ret)) {
11868                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11869                         unlock_user(p, arg2, 0);
11870                         return -TARGET_EFAULT;
11871                     }
11872                 }
11873 
11874                 if (ret != -TARGET_ENOSYS) {
11875                     unlock_user(p, arg2, 0);
11876                     return ret;
11877                 }
11878             }
11879 #endif
11880             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11881             unlock_user(p, arg2, 0);
11882 
11883             if (!is_error(ret)) {
11884                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11885                     return -TARGET_EFAULT;
11886                 }
11887                 memset(target_stx, 0, sizeof(*target_stx));
11888                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11889                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11890                 __put_user(st.st_ino, &target_stx->stx_ino);
11891                 __put_user(st.st_mode, &target_stx->stx_mode);
11892                 __put_user(st.st_uid, &target_stx->stx_uid);
11893                 __put_user(st.st_gid, &target_stx->stx_gid);
11894                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11895                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11896                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11897                 __put_user(st.st_size, &target_stx->stx_size);
11898                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11899                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11900                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11901                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11902                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11903                 unlock_user_struct(target_stx, arg5, 1);
11904             }
11905         }
11906         return ret;
11907 #endif
11908 #ifdef TARGET_NR_lchown
11909     case TARGET_NR_lchown:
11910         if (!(p = lock_user_string(arg1)))
11911             return -TARGET_EFAULT;
11912         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11913         unlock_user(p, arg1, 0);
11914         return ret;
11915 #endif
11916 #ifdef TARGET_NR_getuid
11917     case TARGET_NR_getuid:
11918         return get_errno(high2lowuid(getuid()));
11919 #endif
11920 #ifdef TARGET_NR_getgid
11921     case TARGET_NR_getgid:
11922         return get_errno(high2lowgid(getgid()));
11923 #endif
11924 #ifdef TARGET_NR_geteuid
11925     case TARGET_NR_geteuid:
11926         return get_errno(high2lowuid(geteuid()));
11927 #endif
11928 #ifdef TARGET_NR_getegid
11929     case TARGET_NR_getegid:
11930         return get_errno(high2lowgid(getegid()));
11931 #endif
11932     case TARGET_NR_setreuid:
11933         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11934     case TARGET_NR_setregid:
11935         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11936     case TARGET_NR_getgroups:
11937         { /* the same code as for TARGET_NR_getgroups32 */
11938             int gidsetsize = arg1;
11939             target_id *target_grouplist;
11940             g_autofree gid_t *grouplist = NULL;
11941             int i;
11942 
11943             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11944                 return -TARGET_EINVAL;
11945             }
11946             if (gidsetsize > 0) {
11947                 grouplist = g_try_new(gid_t, gidsetsize);
11948                 if (!grouplist) {
11949                     return -TARGET_ENOMEM;
11950                 }
11951             }
11952             ret = get_errno(getgroups(gidsetsize, grouplist));
11953             if (!is_error(ret) && gidsetsize > 0) {
11954                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11955                                              gidsetsize * sizeof(target_id), 0);
11956                 if (!target_grouplist) {
11957                     return -TARGET_EFAULT;
11958                 }
11959                 for (i = 0; i < ret; i++) {
11960                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11961                 }
11962                 unlock_user(target_grouplist, arg2,
11963                             gidsetsize * sizeof(target_id));
11964             }
11965             return ret;
11966         }
11967     case TARGET_NR_setgroups:
11968         { /* the same code as for TARGET_NR_setgroups32 */
11969             int gidsetsize = arg1;
11970             target_id *target_grouplist;
11971             g_autofree gid_t *grouplist = NULL;
11972             int i;
11973 
11974             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11975                 return -TARGET_EINVAL;
11976             }
11977             if (gidsetsize > 0) {
11978                 grouplist = g_try_new(gid_t, gidsetsize);
11979                 if (!grouplist) {
11980                     return -TARGET_ENOMEM;
11981                 }
11982                 target_grouplist = lock_user(VERIFY_READ, arg2,
11983                                              gidsetsize * sizeof(target_id), 1);
11984                 if (!target_grouplist) {
11985                     return -TARGET_EFAULT;
11986                 }
11987                 for (i = 0; i < gidsetsize; i++) {
11988                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11989                 }
11990                 unlock_user(target_grouplist, arg2,
11991                             gidsetsize * sizeof(target_id));
11992             }
11993             return get_errno(sys_setgroups(gidsetsize, grouplist));
11994         }
11995     case TARGET_NR_fchown:
11996         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11997 #if defined(TARGET_NR_fchownat)
11998     case TARGET_NR_fchownat:
11999         if (!(p = lock_user_string(arg2)))
12000             return -TARGET_EFAULT;
12001         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12002                                  low2highgid(arg4), arg5));
12003         unlock_user(p, arg2, 0);
12004         return ret;
12005 #endif
12006 #ifdef TARGET_NR_setresuid
12007     case TARGET_NR_setresuid:
12008         return get_errno(sys_setresuid(low2highuid(arg1),
12009                                        low2highuid(arg2),
12010                                        low2highuid(arg3)));
12011 #endif
12012 #ifdef TARGET_NR_getresuid
12013     case TARGET_NR_getresuid:
12014         {
12015             uid_t ruid, euid, suid;
12016             ret = get_errno(getresuid(&ruid, &euid, &suid));
12017             if (!is_error(ret)) {
12018                 if (put_user_id(high2lowuid(ruid), arg1)
12019                     || put_user_id(high2lowuid(euid), arg2)
12020                     || put_user_id(high2lowuid(suid), arg3))
12021                     return -TARGET_EFAULT;
12022             }
12023         }
12024         return ret;
12025 #endif
12026 #ifdef TARGET_NR_getresgid
12027     case TARGET_NR_setresgid:
12028         return get_errno(sys_setresgid(low2highgid(arg1),
12029                                        low2highgid(arg2),
12030                                        low2highgid(arg3)));
12031 #endif
12032 #ifdef TARGET_NR_getresgid
12033     case TARGET_NR_getresgid:
12034         {
12035             gid_t rgid, egid, sgid;
12036             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12037             if (!is_error(ret)) {
12038                 if (put_user_id(high2lowgid(rgid), arg1)
12039                     || put_user_id(high2lowgid(egid), arg2)
12040                     || put_user_id(high2lowgid(sgid), arg3))
12041                     return -TARGET_EFAULT;
12042             }
12043         }
12044         return ret;
12045 #endif
12046 #ifdef TARGET_NR_chown
12047     case TARGET_NR_chown:
12048         if (!(p = lock_user_string(arg1)))
12049             return -TARGET_EFAULT;
12050         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12051         unlock_user(p, arg1, 0);
12052         return ret;
12053 #endif
12054     case TARGET_NR_setuid:
12055         return get_errno(sys_setuid(low2highuid(arg1)));
12056     case TARGET_NR_setgid:
12057         return get_errno(sys_setgid(low2highgid(arg1)));
12058     case TARGET_NR_setfsuid:
12059         return get_errno(setfsuid(arg1));
12060     case TARGET_NR_setfsgid:
12061         return get_errno(setfsgid(arg1));
12062 
12063 #ifdef TARGET_NR_lchown32
12064     case TARGET_NR_lchown32:
12065         if (!(p = lock_user_string(arg1)))
12066             return -TARGET_EFAULT;
12067         ret = get_errno(lchown(p, arg2, arg3));
12068         unlock_user(p, arg1, 0);
12069         return ret;
12070 #endif
12071 #ifdef TARGET_NR_getuid32
12072     case TARGET_NR_getuid32:
12073         return get_errno(getuid());
12074 #endif
12075 
12076 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12077    /* Alpha specific */
12078     case TARGET_NR_getxuid:
12079          {
12080             uid_t euid;
12081             euid=geteuid();
12082             cpu_env->ir[IR_A4]=euid;
12083          }
12084         return get_errno(getuid());
12085 #endif
12086 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12087    /* Alpha specific */
12088     case TARGET_NR_getxgid:
12089          {
12090             uid_t egid;
12091             egid=getegid();
12092             cpu_env->ir[IR_A4]=egid;
12093          }
12094         return get_errno(getgid());
12095 #endif
12096 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12097     /* Alpha specific */
12098     case TARGET_NR_osf_getsysinfo:
12099         ret = -TARGET_EOPNOTSUPP;
12100         switch (arg1) {
12101           case TARGET_GSI_IEEE_FP_CONTROL:
12102             {
12103                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12104                 uint64_t swcr = cpu_env->swcr;
12105 
12106                 swcr &= ~SWCR_STATUS_MASK;
12107                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12108 
12109                 if (put_user_u64 (swcr, arg2))
12110                         return -TARGET_EFAULT;
12111                 ret = 0;
12112             }
12113             break;
12114 
12115           /* case GSI_IEEE_STATE_AT_SIGNAL:
12116              -- Not implemented in linux kernel.
12117              case GSI_UACPROC:
12118              -- Retrieves current unaligned access state; not much used.
12119              case GSI_PROC_TYPE:
12120              -- Retrieves implver information; surely not used.
12121              case GSI_GET_HWRPB:
12122              -- Grabs a copy of the HWRPB; surely not used.
12123           */
12124         }
12125         return ret;
12126 #endif
12127 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12128     /* Alpha specific */
12129     case TARGET_NR_osf_setsysinfo:
12130         ret = -TARGET_EOPNOTSUPP;
12131         switch (arg1) {
12132           case TARGET_SSI_IEEE_FP_CONTROL:
12133             {
12134                 uint64_t swcr, fpcr;
12135 
12136                 if (get_user_u64 (swcr, arg2)) {
12137                     return -TARGET_EFAULT;
12138                 }
12139 
12140                 /*
12141                  * The kernel calls swcr_update_status to update the
12142                  * status bits from the fpcr at every point that it
12143                  * could be queried.  Therefore, we store the status
12144                  * bits only in FPCR.
12145                  */
12146                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12147 
12148                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12149                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12150                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12151                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12152                 ret = 0;
12153             }
12154             break;
12155 
12156           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12157             {
12158                 uint64_t exc, fpcr, fex;
12159 
12160                 if (get_user_u64(exc, arg2)) {
12161                     return -TARGET_EFAULT;
12162                 }
12163                 exc &= SWCR_STATUS_MASK;
12164                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12165 
12166                 /* Old exceptions are not signaled.  */
12167                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12168                 fex = exc & ~fex;
12169                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12170                 fex &= (cpu_env)->swcr;
12171 
12172                 /* Update the hardware fpcr.  */
12173                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12174                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12175 
12176                 if (fex) {
12177                     int si_code = TARGET_FPE_FLTUNK;
12178                     target_siginfo_t info;
12179 
12180                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12181                         si_code = TARGET_FPE_FLTUND;
12182                     }
12183                     if (fex & SWCR_TRAP_ENABLE_INE) {
12184                         si_code = TARGET_FPE_FLTRES;
12185                     }
12186                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12187                         si_code = TARGET_FPE_FLTUND;
12188                     }
12189                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12190                         si_code = TARGET_FPE_FLTOVF;
12191                     }
12192                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12193                         si_code = TARGET_FPE_FLTDIV;
12194                     }
12195                     if (fex & SWCR_TRAP_ENABLE_INV) {
12196                         si_code = TARGET_FPE_FLTINV;
12197                     }
12198 
12199                     info.si_signo = SIGFPE;
12200                     info.si_errno = 0;
12201                     info.si_code = si_code;
12202                     info._sifields._sigfault._addr = (cpu_env)->pc;
12203                     queue_signal(cpu_env, info.si_signo,
12204                                  QEMU_SI_FAULT, &info);
12205                 }
12206                 ret = 0;
12207             }
12208             break;
12209 
12210           /* case SSI_NVPAIRS:
12211              -- Used with SSIN_UACPROC to enable unaligned accesses.
12212              case SSI_IEEE_STATE_AT_SIGNAL:
12213              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12214              -- Not implemented in linux kernel
12215           */
12216         }
12217         return ret;
12218 #endif
12219 #ifdef TARGET_NR_osf_sigprocmask
12220     /* Alpha specific.  */
12221     case TARGET_NR_osf_sigprocmask:
12222         {
12223             abi_ulong mask;
12224             int how;
12225             sigset_t set, oldset;
12226 
12227             switch(arg1) {
12228             case TARGET_SIG_BLOCK:
12229                 how = SIG_BLOCK;
12230                 break;
12231             case TARGET_SIG_UNBLOCK:
12232                 how = SIG_UNBLOCK;
12233                 break;
12234             case TARGET_SIG_SETMASK:
12235                 how = SIG_SETMASK;
12236                 break;
12237             default:
12238                 return -TARGET_EINVAL;
12239             }
12240             mask = arg2;
12241             target_to_host_old_sigset(&set, &mask);
12242             ret = do_sigprocmask(how, &set, &oldset);
12243             if (!ret) {
12244                 host_to_target_old_sigset(&mask, &oldset);
12245                 ret = mask;
12246             }
12247         }
12248         return ret;
12249 #endif
12250 
12251 #ifdef TARGET_NR_getgid32
12252     case TARGET_NR_getgid32:
12253         return get_errno(getgid());
12254 #endif
12255 #ifdef TARGET_NR_geteuid32
12256     case TARGET_NR_geteuid32:
12257         return get_errno(geteuid());
12258 #endif
12259 #ifdef TARGET_NR_getegid32
12260     case TARGET_NR_getegid32:
12261         return get_errno(getegid());
12262 #endif
12263 #ifdef TARGET_NR_setreuid32
12264     case TARGET_NR_setreuid32:
12265         return get_errno(setreuid(arg1, arg2));
12266 #endif
12267 #ifdef TARGET_NR_setregid32
12268     case TARGET_NR_setregid32:
12269         return get_errno(setregid(arg1, arg2));
12270 #endif
12271 #ifdef TARGET_NR_getgroups32
12272     case TARGET_NR_getgroups32:
12273         { /* the same code as for TARGET_NR_getgroups */
12274             int gidsetsize = arg1;
12275             uint32_t *target_grouplist;
12276             g_autofree gid_t *grouplist = NULL;
12277             int i;
12278 
12279             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12280                 return -TARGET_EINVAL;
12281             }
12282             if (gidsetsize > 0) {
12283                 grouplist = g_try_new(gid_t, gidsetsize);
12284                 if (!grouplist) {
12285                     return -TARGET_ENOMEM;
12286                 }
12287             }
12288             ret = get_errno(getgroups(gidsetsize, grouplist));
12289             if (!is_error(ret) && gidsetsize > 0) {
12290                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12291                                              gidsetsize * 4, 0);
12292                 if (!target_grouplist) {
12293                     return -TARGET_EFAULT;
12294                 }
12295                 for (i = 0; i < ret; i++) {
12296                     target_grouplist[i] = tswap32(grouplist[i]);
12297                 }
12298                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12299             }
12300             return ret;
12301         }
12302 #endif
12303 #ifdef TARGET_NR_setgroups32
12304     case TARGET_NR_setgroups32:
12305         { /* the same code as for TARGET_NR_setgroups */
12306             int gidsetsize = arg1;
12307             uint32_t *target_grouplist;
12308             g_autofree gid_t *grouplist = NULL;
12309             int i;
12310 
12311             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12312                 return -TARGET_EINVAL;
12313             }
12314             if (gidsetsize > 0) {
12315                 grouplist = g_try_new(gid_t, gidsetsize);
12316                 if (!grouplist) {
12317                     return -TARGET_ENOMEM;
12318                 }
12319                 target_grouplist = lock_user(VERIFY_READ, arg2,
12320                                              gidsetsize * 4, 1);
12321                 if (!target_grouplist) {
12322                     return -TARGET_EFAULT;
12323                 }
12324                 for (i = 0; i < gidsetsize; i++) {
12325                     grouplist[i] = tswap32(target_grouplist[i]);
12326                 }
12327                 unlock_user(target_grouplist, arg2, 0);
12328             }
12329             return get_errno(sys_setgroups(gidsetsize, grouplist));
12330         }
12331 #endif
12332 #ifdef TARGET_NR_fchown32
12333     case TARGET_NR_fchown32:
12334         return get_errno(fchown(arg1, arg2, arg3));
12335 #endif
12336 #ifdef TARGET_NR_setresuid32
12337     case TARGET_NR_setresuid32:
12338         return get_errno(sys_setresuid(arg1, arg2, arg3));
12339 #endif
12340 #ifdef TARGET_NR_getresuid32
12341     case TARGET_NR_getresuid32:
12342         {
12343             uid_t ruid, euid, suid;
12344             ret = get_errno(getresuid(&ruid, &euid, &suid));
12345             if (!is_error(ret)) {
12346                 if (put_user_u32(ruid, arg1)
12347                     || put_user_u32(euid, arg2)
12348                     || put_user_u32(suid, arg3))
12349                     return -TARGET_EFAULT;
12350             }
12351         }
12352         return ret;
12353 #endif
12354 #ifdef TARGET_NR_setresgid32
12355     case TARGET_NR_setresgid32:
12356         return get_errno(sys_setresgid(arg1, arg2, arg3));
12357 #endif
12358 #ifdef TARGET_NR_getresgid32
12359     case TARGET_NR_getresgid32:
12360         {
12361             gid_t rgid, egid, sgid;
12362             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12363             if (!is_error(ret)) {
12364                 if (put_user_u32(rgid, arg1)
12365                     || put_user_u32(egid, arg2)
12366                     || put_user_u32(sgid, arg3))
12367                     return -TARGET_EFAULT;
12368             }
12369         }
12370         return ret;
12371 #endif
12372 #ifdef TARGET_NR_chown32
12373     case TARGET_NR_chown32:
12374         if (!(p = lock_user_string(arg1)))
12375             return -TARGET_EFAULT;
12376         ret = get_errno(chown(p, arg2, arg3));
12377         unlock_user(p, arg1, 0);
12378         return ret;
12379 #endif
12380 #ifdef TARGET_NR_setuid32
12381     case TARGET_NR_setuid32:
12382         return get_errno(sys_setuid(arg1));
12383 #endif
12384 #ifdef TARGET_NR_setgid32
12385     case TARGET_NR_setgid32:
12386         return get_errno(sys_setgid(arg1));
12387 #endif
12388 #ifdef TARGET_NR_setfsuid32
12389     case TARGET_NR_setfsuid32:
12390         return get_errno(setfsuid(arg1));
12391 #endif
12392 #ifdef TARGET_NR_setfsgid32
12393     case TARGET_NR_setfsgid32:
12394         return get_errno(setfsgid(arg1));
12395 #endif
12396 #ifdef TARGET_NR_mincore
12397     case TARGET_NR_mincore:
12398         {
12399             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12400             if (!a) {
12401                 return -TARGET_ENOMEM;
12402             }
12403             p = lock_user_string(arg3);
12404             if (!p) {
12405                 ret = -TARGET_EFAULT;
12406             } else {
12407                 ret = get_errno(mincore(a, arg2, p));
12408                 unlock_user(p, arg3, ret);
12409             }
12410             unlock_user(a, arg1, 0);
12411         }
12412         return ret;
12413 #endif
12414 #ifdef TARGET_NR_arm_fadvise64_64
12415     case TARGET_NR_arm_fadvise64_64:
12416         /* arm_fadvise64_64 looks like fadvise64_64 but
12417          * with different argument order: fd, advice, offset, len
12418          * rather than the usual fd, offset, len, advice.
12419          * Note that offset and len are both 64-bit so appear as
12420          * pairs of 32-bit registers.
12421          */
12422         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12423                             target_offset64(arg5, arg6), arg2);
12424         return -host_to_target_errno(ret);
12425 #endif
12426 
12427 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12428 
12429 #ifdef TARGET_NR_fadvise64_64
12430     case TARGET_NR_fadvise64_64:
12431 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12432         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12433         ret = arg2;
12434         arg2 = arg3;
12435         arg3 = arg4;
12436         arg4 = arg5;
12437         arg5 = arg6;
12438         arg6 = ret;
12439 #else
12440         /* 6 args: fd, offset (high, low), len (high, low), advice */
12441         if (regpairs_aligned(cpu_env, num)) {
12442             /* offset is in (3,4), len in (5,6) and advice in 7 */
12443             arg2 = arg3;
12444             arg3 = arg4;
12445             arg4 = arg5;
12446             arg5 = arg6;
12447             arg6 = arg7;
12448         }
12449 #endif
12450         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12451                             target_offset64(arg4, arg5), arg6);
12452         return -host_to_target_errno(ret);
12453 #endif
12454 
12455 #ifdef TARGET_NR_fadvise64
12456     case TARGET_NR_fadvise64:
12457         /* 5 args: fd, offset (high, low), len, advice */
12458         if (regpairs_aligned(cpu_env, num)) {
12459             /* offset is in (3,4), len in 5 and advice in 6 */
12460             arg2 = arg3;
12461             arg3 = arg4;
12462             arg4 = arg5;
12463             arg5 = arg6;
12464         }
12465         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12466         return -host_to_target_errno(ret);
12467 #endif
12468 
12469 #else /* not a 32-bit ABI */
12470 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12471 #ifdef TARGET_NR_fadvise64_64
12472     case TARGET_NR_fadvise64_64:
12473 #endif
12474 #ifdef TARGET_NR_fadvise64
12475     case TARGET_NR_fadvise64:
12476 #endif
12477 #ifdef TARGET_S390X
12478         switch (arg4) {
12479         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12480         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12481         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12482         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12483         default: break;
12484         }
12485 #endif
12486         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12487 #endif
12488 #endif /* end of 64-bit ABI fadvise handling */
12489 
12490 #ifdef TARGET_NR_madvise
12491     case TARGET_NR_madvise:
12492         return target_madvise(arg1, arg2, arg3);
12493 #endif
12494 #ifdef TARGET_NR_fcntl64
12495     case TARGET_NR_fcntl64:
12496     {
12497         int cmd;
12498         struct flock fl;
12499         from_flock64_fn *copyfrom = copy_from_user_flock64;
12500         to_flock64_fn *copyto = copy_to_user_flock64;
12501 
12502 #ifdef TARGET_ARM
12503         if (!cpu_env->eabi) {
12504             copyfrom = copy_from_user_oabi_flock64;
12505             copyto = copy_to_user_oabi_flock64;
12506         }
12507 #endif
12508 
12509         cmd = target_to_host_fcntl_cmd(arg2);
12510         if (cmd == -TARGET_EINVAL) {
12511             return cmd;
12512         }
12513 
12514         switch(arg2) {
12515         case TARGET_F_GETLK64:
12516             ret = copyfrom(&fl, arg3);
12517             if (ret) {
12518                 break;
12519             }
12520             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12521             if (ret == 0) {
12522                 ret = copyto(arg3, &fl);
12523             }
12524 	    break;
12525 
12526         case TARGET_F_SETLK64:
12527         case TARGET_F_SETLKW64:
12528             ret = copyfrom(&fl, arg3);
12529             if (ret) {
12530                 break;
12531             }
12532             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12533 	    break;
12534         default:
12535             ret = do_fcntl(arg1, arg2, arg3);
12536             break;
12537         }
12538         return ret;
12539     }
12540 #endif
12541 #ifdef TARGET_NR_cacheflush
12542     case TARGET_NR_cacheflush:
12543         /* self-modifying code is handled automatically, so nothing needed */
12544         return 0;
12545 #endif
12546 #ifdef TARGET_NR_getpagesize
12547     case TARGET_NR_getpagesize:
12548         return TARGET_PAGE_SIZE;
12549 #endif
12550     case TARGET_NR_gettid:
12551         return get_errno(sys_gettid());
12552 #ifdef TARGET_NR_readahead
12553     case TARGET_NR_readahead:
12554 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12555         if (regpairs_aligned(cpu_env, num)) {
12556             arg2 = arg3;
12557             arg3 = arg4;
12558             arg4 = arg5;
12559         }
12560         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12561 #else
12562         ret = get_errno(readahead(arg1, arg2, arg3));
12563 #endif
12564         return ret;
12565 #endif
12566 #ifdef CONFIG_ATTR
12567 #ifdef TARGET_NR_setxattr
12568     case TARGET_NR_listxattr:
12569     case TARGET_NR_llistxattr:
12570     {
12571         void *b = 0;
12572         if (arg2) {
12573             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12574             if (!b) {
12575                 return -TARGET_EFAULT;
12576             }
12577         }
12578         p = lock_user_string(arg1);
12579         if (p) {
12580             if (num == TARGET_NR_listxattr) {
12581                 ret = get_errno(listxattr(p, b, arg3));
12582             } else {
12583                 ret = get_errno(llistxattr(p, b, arg3));
12584             }
12585         } else {
12586             ret = -TARGET_EFAULT;
12587         }
12588         unlock_user(p, arg1, 0);
12589         unlock_user(b, arg2, arg3);
12590         return ret;
12591     }
12592     case TARGET_NR_flistxattr:
12593     {
12594         void *b = 0;
12595         if (arg2) {
12596             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12597             if (!b) {
12598                 return -TARGET_EFAULT;
12599             }
12600         }
12601         ret = get_errno(flistxattr(arg1, b, arg3));
12602         unlock_user(b, arg2, arg3);
12603         return ret;
12604     }
12605     case TARGET_NR_setxattr:
12606     case TARGET_NR_lsetxattr:
12607         {
12608             void *n, *v = 0;
12609             if (arg3) {
12610                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12611                 if (!v) {
12612                     return -TARGET_EFAULT;
12613                 }
12614             }
12615             p = lock_user_string(arg1);
12616             n = lock_user_string(arg2);
12617             if (p && n) {
12618                 if (num == TARGET_NR_setxattr) {
12619                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12620                 } else {
12621                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12622                 }
12623             } else {
12624                 ret = -TARGET_EFAULT;
12625             }
12626             unlock_user(p, arg1, 0);
12627             unlock_user(n, arg2, 0);
12628             unlock_user(v, arg3, 0);
12629         }
12630         return ret;
12631     case TARGET_NR_fsetxattr:
12632         {
12633             void *n, *v = 0;
12634             if (arg3) {
12635                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12636                 if (!v) {
12637                     return -TARGET_EFAULT;
12638                 }
12639             }
12640             n = lock_user_string(arg2);
12641             if (n) {
12642                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12643             } else {
12644                 ret = -TARGET_EFAULT;
12645             }
12646             unlock_user(n, arg2, 0);
12647             unlock_user(v, arg3, 0);
12648         }
12649         return ret;
12650     case TARGET_NR_getxattr:
12651     case TARGET_NR_lgetxattr:
12652         {
12653             void *n, *v = 0;
12654             if (arg3) {
12655                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12656                 if (!v) {
12657                     return -TARGET_EFAULT;
12658                 }
12659             }
12660             p = lock_user_string(arg1);
12661             n = lock_user_string(arg2);
12662             if (p && n) {
12663                 if (num == TARGET_NR_getxattr) {
12664                     ret = get_errno(getxattr(p, n, v, arg4));
12665                 } else {
12666                     ret = get_errno(lgetxattr(p, n, v, arg4));
12667                 }
12668             } else {
12669                 ret = -TARGET_EFAULT;
12670             }
12671             unlock_user(p, arg1, 0);
12672             unlock_user(n, arg2, 0);
12673             unlock_user(v, arg3, arg4);
12674         }
12675         return ret;
12676     case TARGET_NR_fgetxattr:
12677         {
12678             void *n, *v = 0;
12679             if (arg3) {
12680                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12681                 if (!v) {
12682                     return -TARGET_EFAULT;
12683                 }
12684             }
12685             n = lock_user_string(arg2);
12686             if (n) {
12687                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12688             } else {
12689                 ret = -TARGET_EFAULT;
12690             }
12691             unlock_user(n, arg2, 0);
12692             unlock_user(v, arg3, arg4);
12693         }
12694         return ret;
12695     case TARGET_NR_removexattr:
12696     case TARGET_NR_lremovexattr:
12697         {
12698             void *n;
12699             p = lock_user_string(arg1);
12700             n = lock_user_string(arg2);
12701             if (p && n) {
12702                 if (num == TARGET_NR_removexattr) {
12703                     ret = get_errno(removexattr(p, n));
12704                 } else {
12705                     ret = get_errno(lremovexattr(p, n));
12706                 }
12707             } else {
12708                 ret = -TARGET_EFAULT;
12709             }
12710             unlock_user(p, arg1, 0);
12711             unlock_user(n, arg2, 0);
12712         }
12713         return ret;
12714     case TARGET_NR_fremovexattr:
12715         {
12716             void *n;
12717             n = lock_user_string(arg2);
12718             if (n) {
12719                 ret = get_errno(fremovexattr(arg1, n));
12720             } else {
12721                 ret = -TARGET_EFAULT;
12722             }
12723             unlock_user(n, arg2, 0);
12724         }
12725         return ret;
12726 #endif
12727 #endif /* CONFIG_ATTR */
12728 #ifdef TARGET_NR_set_thread_area
12729     case TARGET_NR_set_thread_area:
12730 #if defined(TARGET_MIPS)
12731       cpu_env->active_tc.CP0_UserLocal = arg1;
12732       return 0;
12733 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12734       return do_set_thread_area(cpu_env, arg1);
12735 #elif defined(TARGET_M68K)
12736       {
12737           TaskState *ts = get_task_state(cpu);
12738           ts->tp_value = arg1;
12739           return 0;
12740       }
12741 #else
12742       return -TARGET_ENOSYS;
12743 #endif
12744 #endif
12745 #ifdef TARGET_NR_get_thread_area
12746     case TARGET_NR_get_thread_area:
12747 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12748         return do_get_thread_area(cpu_env, arg1);
12749 #elif defined(TARGET_M68K)
12750         {
12751             TaskState *ts = get_task_state(cpu);
12752             return ts->tp_value;
12753         }
12754 #else
12755         return -TARGET_ENOSYS;
12756 #endif
12757 #endif
12758 #ifdef TARGET_NR_getdomainname
12759     case TARGET_NR_getdomainname:
12760         return -TARGET_ENOSYS;
12761 #endif
12762 
12763 #ifdef TARGET_NR_clock_settime
12764     case TARGET_NR_clock_settime:
12765     {
12766         struct timespec ts;
12767 
12768         ret = target_to_host_timespec(&ts, arg2);
12769         if (!is_error(ret)) {
12770             ret = get_errno(clock_settime(arg1, &ts));
12771         }
12772         return ret;
12773     }
12774 #endif
12775 #ifdef TARGET_NR_clock_settime64
12776     case TARGET_NR_clock_settime64:
12777     {
12778         struct timespec ts;
12779 
12780         ret = target_to_host_timespec64(&ts, arg2);
12781         if (!is_error(ret)) {
12782             ret = get_errno(clock_settime(arg1, &ts));
12783         }
12784         return ret;
12785     }
12786 #endif
12787 #ifdef TARGET_NR_clock_gettime
12788     case TARGET_NR_clock_gettime:
12789     {
12790         struct timespec ts;
12791         ret = get_errno(clock_gettime(arg1, &ts));
12792         if (!is_error(ret)) {
12793             ret = host_to_target_timespec(arg2, &ts);
12794         }
12795         return ret;
12796     }
12797 #endif
12798 #ifdef TARGET_NR_clock_gettime64
12799     case TARGET_NR_clock_gettime64:
12800     {
12801         struct timespec ts;
12802         ret = get_errno(clock_gettime(arg1, &ts));
12803         if (!is_error(ret)) {
12804             ret = host_to_target_timespec64(arg2, &ts);
12805         }
12806         return ret;
12807     }
12808 #endif
12809 #ifdef TARGET_NR_clock_getres
12810     case TARGET_NR_clock_getres:
12811     {
12812         struct timespec ts;
12813         ret = get_errno(clock_getres(arg1, &ts));
12814         if (!is_error(ret)) {
12815             host_to_target_timespec(arg2, &ts);
12816         }
12817         return ret;
12818     }
12819 #endif
12820 #ifdef TARGET_NR_clock_getres_time64
12821     case TARGET_NR_clock_getres_time64:
12822     {
12823         struct timespec ts;
12824         ret = get_errno(clock_getres(arg1, &ts));
12825         if (!is_error(ret)) {
12826             host_to_target_timespec64(arg2, &ts);
12827         }
12828         return ret;
12829     }
12830 #endif
12831 #ifdef TARGET_NR_clock_nanosleep
12832     case TARGET_NR_clock_nanosleep:
12833     {
12834         struct timespec ts;
12835         if (target_to_host_timespec(&ts, arg3)) {
12836             return -TARGET_EFAULT;
12837         }
12838         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12839                                              &ts, arg4 ? &ts : NULL));
12840         /*
12841          * if the call is interrupted by a signal handler, it fails
12842          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12843          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12844          */
12845         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12846             host_to_target_timespec(arg4, &ts)) {
12847               return -TARGET_EFAULT;
12848         }
12849 
12850         return ret;
12851     }
12852 #endif
12853 #ifdef TARGET_NR_clock_nanosleep_time64
12854     case TARGET_NR_clock_nanosleep_time64:
12855     {
12856         struct timespec ts;
12857 
12858         if (target_to_host_timespec64(&ts, arg3)) {
12859             return -TARGET_EFAULT;
12860         }
12861 
12862         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12863                                              &ts, arg4 ? &ts : NULL));
12864 
12865         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12866             host_to_target_timespec64(arg4, &ts)) {
12867             return -TARGET_EFAULT;
12868         }
12869         return ret;
12870     }
12871 #endif
12872 
12873 #if defined(TARGET_NR_set_tid_address)
12874     case TARGET_NR_set_tid_address:
12875     {
12876         TaskState *ts = get_task_state(cpu);
12877         ts->child_tidptr = arg1;
12878         /* do not call host set_tid_address() syscall, instead return tid() */
12879         return get_errno(sys_gettid());
12880     }
12881 #endif
12882 
12883     case TARGET_NR_tkill:
12884         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12885 
12886     case TARGET_NR_tgkill:
12887         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12888                          target_to_host_signal(arg3)));
12889 
12890 #ifdef TARGET_NR_set_robust_list
12891     case TARGET_NR_set_robust_list:
12892     case TARGET_NR_get_robust_list:
12893         /* The ABI for supporting robust futexes has userspace pass
12894          * the kernel a pointer to a linked list which is updated by
12895          * userspace after the syscall; the list is walked by the kernel
12896          * when the thread exits. Since the linked list in QEMU guest
12897          * memory isn't a valid linked list for the host and we have
12898          * no way to reliably intercept the thread-death event, we can't
12899          * support these. Silently return ENOSYS so that guest userspace
12900          * falls back to a non-robust futex implementation (which should
12901          * be OK except in the corner case of the guest crashing while
12902          * holding a mutex that is shared with another process via
12903          * shared memory).
12904          */
12905         return -TARGET_ENOSYS;
12906 #endif
12907 
12908 #if defined(TARGET_NR_utimensat)
12909     case TARGET_NR_utimensat:
12910         {
12911             struct timespec *tsp, ts[2];
12912             if (!arg3) {
12913                 tsp = NULL;
12914             } else {
12915                 if (target_to_host_timespec(ts, arg3)) {
12916                     return -TARGET_EFAULT;
12917                 }
12918                 if (target_to_host_timespec(ts + 1, arg3 +
12919                                             sizeof(struct target_timespec))) {
12920                     return -TARGET_EFAULT;
12921                 }
12922                 tsp = ts;
12923             }
12924             if (!arg2)
12925                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12926             else {
12927                 if (!(p = lock_user_string(arg2))) {
12928                     return -TARGET_EFAULT;
12929                 }
12930                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12931                 unlock_user(p, arg2, 0);
12932             }
12933         }
12934         return ret;
12935 #endif
12936 #ifdef TARGET_NR_utimensat_time64
12937     case TARGET_NR_utimensat_time64:
12938         {
12939             struct timespec *tsp, ts[2];
12940             if (!arg3) {
12941                 tsp = NULL;
12942             } else {
12943                 if (target_to_host_timespec64(ts, arg3)) {
12944                     return -TARGET_EFAULT;
12945                 }
12946                 if (target_to_host_timespec64(ts + 1, arg3 +
12947                                      sizeof(struct target__kernel_timespec))) {
12948                     return -TARGET_EFAULT;
12949                 }
12950                 tsp = ts;
12951             }
12952             if (!arg2)
12953                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12954             else {
12955                 p = lock_user_string(arg2);
12956                 if (!p) {
12957                     return -TARGET_EFAULT;
12958                 }
12959                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12960                 unlock_user(p, arg2, 0);
12961             }
12962         }
12963         return ret;
12964 #endif
12965 #ifdef TARGET_NR_futex
12966     case TARGET_NR_futex:
12967         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12968 #endif
12969 #ifdef TARGET_NR_futex_time64
12970     case TARGET_NR_futex_time64:
12971         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12972 #endif
12973 #ifdef CONFIG_INOTIFY
12974 #if defined(TARGET_NR_inotify_init)
12975     case TARGET_NR_inotify_init:
12976         ret = get_errno(inotify_init());
12977         if (ret >= 0) {
12978             fd_trans_register(ret, &target_inotify_trans);
12979         }
12980         return ret;
12981 #endif
12982 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12983     case TARGET_NR_inotify_init1:
12984         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12985                                           fcntl_flags_tbl)));
12986         if (ret >= 0) {
12987             fd_trans_register(ret, &target_inotify_trans);
12988         }
12989         return ret;
12990 #endif
12991 #if defined(TARGET_NR_inotify_add_watch)
12992     case TARGET_NR_inotify_add_watch:
12993         p = lock_user_string(arg2);
12994         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12995         unlock_user(p, arg2, 0);
12996         return ret;
12997 #endif
12998 #if defined(TARGET_NR_inotify_rm_watch)
12999     case TARGET_NR_inotify_rm_watch:
13000         return get_errno(inotify_rm_watch(arg1, arg2));
13001 #endif
13002 #endif
13003 
13004 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13005     case TARGET_NR_mq_open:
13006         {
13007             struct mq_attr posix_mq_attr;
13008             struct mq_attr *pposix_mq_attr;
13009             int host_flags;
13010 
13011             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13012             pposix_mq_attr = NULL;
13013             if (arg4) {
13014                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13015                     return -TARGET_EFAULT;
13016                 }
13017                 pposix_mq_attr = &posix_mq_attr;
13018             }
13019             p = lock_user_string(arg1 - 1);
13020             if (!p) {
13021                 return -TARGET_EFAULT;
13022             }
13023             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13024             unlock_user (p, arg1, 0);
13025         }
13026         return ret;
13027 
13028     case TARGET_NR_mq_unlink:
13029         p = lock_user_string(arg1 - 1);
13030         if (!p) {
13031             return -TARGET_EFAULT;
13032         }
13033         ret = get_errno(mq_unlink(p));
13034         unlock_user (p, arg1, 0);
13035         return ret;
13036 
13037 #ifdef TARGET_NR_mq_timedsend
13038     case TARGET_NR_mq_timedsend:
13039         {
13040             struct timespec ts;
13041 
13042             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13043             if (arg5 != 0) {
13044                 if (target_to_host_timespec(&ts, arg5)) {
13045                     return -TARGET_EFAULT;
13046                 }
13047                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13048                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13049                     return -TARGET_EFAULT;
13050                 }
13051             } else {
13052                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13053             }
13054             unlock_user (p, arg2, arg3);
13055         }
13056         return ret;
13057 #endif
13058 #ifdef TARGET_NR_mq_timedsend_time64
13059     case TARGET_NR_mq_timedsend_time64:
13060         {
13061             struct timespec ts;
13062 
13063             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13064             if (arg5 != 0) {
13065                 if (target_to_host_timespec64(&ts, arg5)) {
13066                     return -TARGET_EFAULT;
13067                 }
13068                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13069                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13070                     return -TARGET_EFAULT;
13071                 }
13072             } else {
13073                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13074             }
13075             unlock_user(p, arg2, arg3);
13076         }
13077         return ret;
13078 #endif
13079 
13080 #ifdef TARGET_NR_mq_timedreceive
13081     case TARGET_NR_mq_timedreceive:
13082         {
13083             struct timespec ts;
13084             unsigned int prio;
13085 
13086             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13087             if (arg5 != 0) {
13088                 if (target_to_host_timespec(&ts, arg5)) {
13089                     return -TARGET_EFAULT;
13090                 }
13091                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13092                                                      &prio, &ts));
13093                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13094                     return -TARGET_EFAULT;
13095                 }
13096             } else {
13097                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13098                                                      &prio, NULL));
13099             }
13100             unlock_user (p, arg2, arg3);
13101             if (arg4 != 0)
13102                 put_user_u32(prio, arg4);
13103         }
13104         return ret;
13105 #endif
13106 #ifdef TARGET_NR_mq_timedreceive_time64
13107     case TARGET_NR_mq_timedreceive_time64:
13108         {
13109             struct timespec ts;
13110             unsigned int prio;
13111 
13112             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13113             if (arg5 != 0) {
13114                 if (target_to_host_timespec64(&ts, arg5)) {
13115                     return -TARGET_EFAULT;
13116                 }
13117                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13118                                                      &prio, &ts));
13119                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13120                     return -TARGET_EFAULT;
13121                 }
13122             } else {
13123                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13124                                                      &prio, NULL));
13125             }
13126             unlock_user(p, arg2, arg3);
13127             if (arg4 != 0) {
13128                 put_user_u32(prio, arg4);
13129             }
13130         }
13131         return ret;
13132 #endif
13133 
13134     /* Not implemented for now... */
13135 /*     case TARGET_NR_mq_notify: */
13136 /*         break; */
13137 
13138     case TARGET_NR_mq_getsetattr:
13139         {
13140             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13141             ret = 0;
13142             if (arg2 != 0) {
13143                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13144                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13145                                            &posix_mq_attr_out));
13146             } else if (arg3 != 0) {
13147                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13148             }
13149             if (ret == 0 && arg3 != 0) {
13150                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13151             }
13152         }
13153         return ret;
13154 #endif
13155 
13156 #ifdef CONFIG_SPLICE
13157 #ifdef TARGET_NR_tee
13158     case TARGET_NR_tee:
13159         {
13160             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13161         }
13162         return ret;
13163 #endif
13164 #ifdef TARGET_NR_splice
13165     case TARGET_NR_splice:
13166         {
13167             loff_t loff_in, loff_out;
13168             loff_t *ploff_in = NULL, *ploff_out = NULL;
13169             if (arg2) {
13170                 if (get_user_u64(loff_in, arg2)) {
13171                     return -TARGET_EFAULT;
13172                 }
13173                 ploff_in = &loff_in;
13174             }
13175             if (arg4) {
13176                 if (get_user_u64(loff_out, arg4)) {
13177                     return -TARGET_EFAULT;
13178                 }
13179                 ploff_out = &loff_out;
13180             }
13181             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13182             if (arg2) {
13183                 if (put_user_u64(loff_in, arg2)) {
13184                     return -TARGET_EFAULT;
13185                 }
13186             }
13187             if (arg4) {
13188                 if (put_user_u64(loff_out, arg4)) {
13189                     return -TARGET_EFAULT;
13190                 }
13191             }
13192         }
13193         return ret;
13194 #endif
13195 #ifdef TARGET_NR_vmsplice
13196 	case TARGET_NR_vmsplice:
13197         {
13198             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13199             if (vec != NULL) {
13200                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13201                 unlock_iovec(vec, arg2, arg3, 0);
13202             } else {
13203                 ret = -host_to_target_errno(errno);
13204             }
13205         }
13206         return ret;
13207 #endif
13208 #endif /* CONFIG_SPLICE */
13209 #ifdef CONFIG_EVENTFD
13210 #if defined(TARGET_NR_eventfd)
13211     case TARGET_NR_eventfd:
13212         ret = get_errno(eventfd(arg1, 0));
13213         if (ret >= 0) {
13214             fd_trans_register(ret, &target_eventfd_trans);
13215         }
13216         return ret;
13217 #endif
13218 #if defined(TARGET_NR_eventfd2)
13219     case TARGET_NR_eventfd2:
13220     {
13221         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13222         if (arg2 & TARGET_O_NONBLOCK) {
13223             host_flags |= O_NONBLOCK;
13224         }
13225         if (arg2 & TARGET_O_CLOEXEC) {
13226             host_flags |= O_CLOEXEC;
13227         }
13228         ret = get_errno(eventfd(arg1, host_flags));
13229         if (ret >= 0) {
13230             fd_trans_register(ret, &target_eventfd_trans);
13231         }
13232         return ret;
13233     }
13234 #endif
13235 #endif /* CONFIG_EVENTFD  */
13236 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13237     case TARGET_NR_fallocate:
13238 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13239         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13240                                   target_offset64(arg5, arg6)));
13241 #else
13242         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13243 #endif
13244         return ret;
13245 #endif
13246 #if defined(CONFIG_SYNC_FILE_RANGE)
13247 #if defined(TARGET_NR_sync_file_range)
13248     case TARGET_NR_sync_file_range:
13249 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13250 #if defined(TARGET_MIPS)
13251         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13252                                         target_offset64(arg5, arg6), arg7));
13253 #else
13254         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13255                                         target_offset64(arg4, arg5), arg6));
13256 #endif /* !TARGET_MIPS */
13257 #else
13258         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13259 #endif
13260         return ret;
13261 #endif
13262 #if defined(TARGET_NR_sync_file_range2) || \
13263     defined(TARGET_NR_arm_sync_file_range)
13264 #if defined(TARGET_NR_sync_file_range2)
13265     case TARGET_NR_sync_file_range2:
13266 #endif
13267 #if defined(TARGET_NR_arm_sync_file_range)
13268     case TARGET_NR_arm_sync_file_range:
13269 #endif
13270         /* This is like sync_file_range but the arguments are reordered */
13271 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13272         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13273                                         target_offset64(arg5, arg6), arg2));
13274 #else
13275         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13276 #endif
13277         return ret;
13278 #endif
13279 #endif
13280 #if defined(TARGET_NR_signalfd4)
13281     case TARGET_NR_signalfd4:
13282         return do_signalfd4(arg1, arg2, arg4);
13283 #endif
13284 #if defined(TARGET_NR_signalfd)
13285     case TARGET_NR_signalfd:
13286         return do_signalfd4(arg1, arg2, 0);
13287 #endif
13288 #if defined(CONFIG_EPOLL)
13289 #if defined(TARGET_NR_epoll_create)
13290     case TARGET_NR_epoll_create:
13291         return get_errno(epoll_create(arg1));
13292 #endif
13293 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13294     case TARGET_NR_epoll_create1:
13295         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13296 #endif
13297 #if defined(TARGET_NR_epoll_ctl)
13298     case TARGET_NR_epoll_ctl:
13299     {
13300         struct epoll_event ep;
13301         struct epoll_event *epp = 0;
13302         if (arg4) {
13303             if (arg2 != EPOLL_CTL_DEL) {
13304                 struct target_epoll_event *target_ep;
13305                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13306                     return -TARGET_EFAULT;
13307                 }
13308                 ep.events = tswap32(target_ep->events);
13309                 /*
13310                  * The epoll_data_t union is just opaque data to the kernel,
13311                  * so we transfer all 64 bits across and need not worry what
13312                  * actual data type it is.
13313                  */
13314                 ep.data.u64 = tswap64(target_ep->data.u64);
13315                 unlock_user_struct(target_ep, arg4, 0);
13316             }
13317             /*
13318              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13319              * non-null pointer, even though this argument is ignored.
13320              *
13321              */
13322             epp = &ep;
13323         }
13324         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13325     }
13326 #endif
13327 
13328 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13329 #if defined(TARGET_NR_epoll_wait)
13330     case TARGET_NR_epoll_wait:
13331 #endif
13332 #if defined(TARGET_NR_epoll_pwait)
13333     case TARGET_NR_epoll_pwait:
13334 #endif
13335     {
13336         struct target_epoll_event *target_ep;
13337         struct epoll_event *ep;
13338         int epfd = arg1;
13339         int maxevents = arg3;
13340         int timeout = arg4;
13341 
13342         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13343             return -TARGET_EINVAL;
13344         }
13345 
13346         target_ep = lock_user(VERIFY_WRITE, arg2,
13347                               maxevents * sizeof(struct target_epoll_event), 1);
13348         if (!target_ep) {
13349             return -TARGET_EFAULT;
13350         }
13351 
13352         ep = g_try_new(struct epoll_event, maxevents);
13353         if (!ep) {
13354             unlock_user(target_ep, arg2, 0);
13355             return -TARGET_ENOMEM;
13356         }
13357 
13358         switch (num) {
13359 #if defined(TARGET_NR_epoll_pwait)
13360         case TARGET_NR_epoll_pwait:
13361         {
13362             sigset_t *set = NULL;
13363 
13364             if (arg5) {
13365                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13366                 if (ret != 0) {
13367                     break;
13368                 }
13369             }
13370 
13371             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13372                                              set, SIGSET_T_SIZE));
13373 
13374             if (set) {
13375                 finish_sigsuspend_mask(ret);
13376             }
13377             break;
13378         }
13379 #endif
13380 #if defined(TARGET_NR_epoll_wait)
13381         case TARGET_NR_epoll_wait:
13382             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13383                                              NULL, 0));
13384             break;
13385 #endif
13386         default:
13387             ret = -TARGET_ENOSYS;
13388         }
13389         if (!is_error(ret)) {
13390             int i;
13391             for (i = 0; i < ret; i++) {
13392                 target_ep[i].events = tswap32(ep[i].events);
13393                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13394             }
13395             unlock_user(target_ep, arg2,
13396                         ret * sizeof(struct target_epoll_event));
13397         } else {
13398             unlock_user(target_ep, arg2, 0);
13399         }
13400         g_free(ep);
13401         return ret;
13402     }
13403 #endif
13404 #endif
13405 #ifdef TARGET_NR_prlimit64
13406     case TARGET_NR_prlimit64:
13407     {
13408         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13409         struct target_rlimit64 *target_rnew, *target_rold;
13410         struct host_rlimit64 rnew, rold, *rnewp = 0;
13411         int resource = target_to_host_resource(arg2);
13412 
13413         if (arg3 && (resource != RLIMIT_AS &&
13414                      resource != RLIMIT_DATA &&
13415                      resource != RLIMIT_STACK)) {
13416             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13417                 return -TARGET_EFAULT;
13418             }
13419             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13420             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13421             unlock_user_struct(target_rnew, arg3, 0);
13422             rnewp = &rnew;
13423         }
13424 
13425         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13426         if (!is_error(ret) && arg4) {
13427             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13428                 return -TARGET_EFAULT;
13429             }
13430             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13431             __put_user(rold.rlim_max, &target_rold->rlim_max);
13432             unlock_user_struct(target_rold, arg4, 1);
13433         }
13434         return ret;
13435     }
13436 #endif
13437 #ifdef TARGET_NR_gethostname
13438     case TARGET_NR_gethostname:
13439     {
13440         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13441         if (name) {
13442             ret = get_errno(gethostname(name, arg2));
13443             unlock_user(name, arg1, arg2);
13444         } else {
13445             ret = -TARGET_EFAULT;
13446         }
13447         return ret;
13448     }
13449 #endif
13450 #ifdef TARGET_NR_atomic_cmpxchg_32
13451     case TARGET_NR_atomic_cmpxchg_32:
13452     {
13453         /* should use start_exclusive from main.c */
13454         abi_ulong mem_value;
13455         if (get_user_u32(mem_value, arg6)) {
13456             target_siginfo_t info;
13457             info.si_signo = SIGSEGV;
13458             info.si_errno = 0;
13459             info.si_code = TARGET_SEGV_MAPERR;
13460             info._sifields._sigfault._addr = arg6;
13461             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13462             ret = 0xdeadbeef;
13463 
13464         }
13465         if (mem_value == arg2)
13466             put_user_u32(arg1, arg6);
13467         return mem_value;
13468     }
13469 #endif
13470 #ifdef TARGET_NR_atomic_barrier
13471     case TARGET_NR_atomic_barrier:
13472         /* Like the kernel implementation and the
13473            qemu arm barrier, no-op this? */
13474         return 0;
13475 #endif
13476 
13477 #ifdef TARGET_NR_timer_create
13478     case TARGET_NR_timer_create:
13479     {
13480         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13481 
13482         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13483 
13484         int clkid = arg1;
13485         int timer_index = next_free_host_timer();
13486 
13487         if (timer_index < 0) {
13488             ret = -TARGET_EAGAIN;
13489         } else {
13490             timer_t *phtimer = g_posix_timers  + timer_index;
13491 
13492             if (arg2) {
13493                 phost_sevp = &host_sevp;
13494                 ret = target_to_host_sigevent(phost_sevp, arg2);
13495                 if (ret != 0) {
13496                     free_host_timer_slot(timer_index);
13497                     return ret;
13498                 }
13499             }
13500 
13501             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13502             if (ret) {
13503                 free_host_timer_slot(timer_index);
13504             } else {
13505                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13506                     timer_delete(*phtimer);
13507                     free_host_timer_slot(timer_index);
13508                     return -TARGET_EFAULT;
13509                 }
13510             }
13511         }
13512         return ret;
13513     }
13514 #endif
13515 
13516 #ifdef TARGET_NR_timer_settime
13517     case TARGET_NR_timer_settime:
13518     {
13519         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13520          * struct itimerspec * old_value */
13521         target_timer_t timerid = get_timer_id(arg1);
13522 
13523         if (timerid < 0) {
13524             ret = timerid;
13525         } else if (arg3 == 0) {
13526             ret = -TARGET_EINVAL;
13527         } else {
13528             timer_t htimer = g_posix_timers[timerid];
13529             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13530 
13531             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13532                 return -TARGET_EFAULT;
13533             }
13534             ret = get_errno(
13535                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13536             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13537                 return -TARGET_EFAULT;
13538             }
13539         }
13540         return ret;
13541     }
13542 #endif
13543 
13544 #ifdef TARGET_NR_timer_settime64
13545     case TARGET_NR_timer_settime64:
13546     {
13547         target_timer_t timerid = get_timer_id(arg1);
13548 
13549         if (timerid < 0) {
13550             ret = timerid;
13551         } else if (arg3 == 0) {
13552             ret = -TARGET_EINVAL;
13553         } else {
13554             timer_t htimer = g_posix_timers[timerid];
13555             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13556 
13557             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13558                 return -TARGET_EFAULT;
13559             }
13560             ret = get_errno(
13561                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13562             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13563                 return -TARGET_EFAULT;
13564             }
13565         }
13566         return ret;
13567     }
13568 #endif
13569 
13570 #ifdef TARGET_NR_timer_gettime
13571     case TARGET_NR_timer_gettime:
13572     {
13573         /* args: timer_t timerid, struct itimerspec *curr_value */
13574         target_timer_t timerid = get_timer_id(arg1);
13575 
13576         if (timerid < 0) {
13577             ret = timerid;
13578         } else if (!arg2) {
13579             ret = -TARGET_EFAULT;
13580         } else {
13581             timer_t htimer = g_posix_timers[timerid];
13582             struct itimerspec hspec;
13583             ret = get_errno(timer_gettime(htimer, &hspec));
13584 
13585             if (host_to_target_itimerspec(arg2, &hspec)) {
13586                 ret = -TARGET_EFAULT;
13587             }
13588         }
13589         return ret;
13590     }
13591 #endif
13592 
13593 #ifdef TARGET_NR_timer_gettime64
13594     case TARGET_NR_timer_gettime64:
13595     {
13596         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13597         target_timer_t timerid = get_timer_id(arg1);
13598 
13599         if (timerid < 0) {
13600             ret = timerid;
13601         } else if (!arg2) {
13602             ret = -TARGET_EFAULT;
13603         } else {
13604             timer_t htimer = g_posix_timers[timerid];
13605             struct itimerspec hspec;
13606             ret = get_errno(timer_gettime(htimer, &hspec));
13607 
13608             if (host_to_target_itimerspec64(arg2, &hspec)) {
13609                 ret = -TARGET_EFAULT;
13610             }
13611         }
13612         return ret;
13613     }
13614 #endif
13615 
13616 #ifdef TARGET_NR_timer_getoverrun
13617     case TARGET_NR_timer_getoverrun:
13618     {
13619         /* args: timer_t timerid */
13620         target_timer_t timerid = get_timer_id(arg1);
13621 
13622         if (timerid < 0) {
13623             ret = timerid;
13624         } else {
13625             timer_t htimer = g_posix_timers[timerid];
13626             ret = get_errno(timer_getoverrun(htimer));
13627         }
13628         return ret;
13629     }
13630 #endif
13631 
13632 #ifdef TARGET_NR_timer_delete
13633     case TARGET_NR_timer_delete:
13634     {
13635         /* args: timer_t timerid */
13636         target_timer_t timerid = get_timer_id(arg1);
13637 
13638         if (timerid < 0) {
13639             ret = timerid;
13640         } else {
13641             timer_t htimer = g_posix_timers[timerid];
13642             ret = get_errno(timer_delete(htimer));
13643             free_host_timer_slot(timerid);
13644         }
13645         return ret;
13646     }
13647 #endif
13648 
13649 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13650     case TARGET_NR_timerfd_create:
13651         ret = get_errno(timerfd_create(arg1,
13652                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13653         if (ret >= 0) {
13654             fd_trans_register(ret, &target_timerfd_trans);
13655         }
13656         return ret;
13657 #endif
13658 
13659 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13660     case TARGET_NR_timerfd_gettime:
13661         {
13662             struct itimerspec its_curr;
13663 
13664             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13665 
13666             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13667                 return -TARGET_EFAULT;
13668             }
13669         }
13670         return ret;
13671 #endif
13672 
13673 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13674     case TARGET_NR_timerfd_gettime64:
13675         {
13676             struct itimerspec its_curr;
13677 
13678             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13679 
13680             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13681                 return -TARGET_EFAULT;
13682             }
13683         }
13684         return ret;
13685 #endif
13686 
13687 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13688     case TARGET_NR_timerfd_settime:
13689         {
13690             struct itimerspec its_new, its_old, *p_new;
13691 
13692             if (arg3) {
13693                 if (target_to_host_itimerspec(&its_new, arg3)) {
13694                     return -TARGET_EFAULT;
13695                 }
13696                 p_new = &its_new;
13697             } else {
13698                 p_new = NULL;
13699             }
13700 
13701             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13702 
13703             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13704                 return -TARGET_EFAULT;
13705             }
13706         }
13707         return ret;
13708 #endif
13709 
13710 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13711     case TARGET_NR_timerfd_settime64:
13712         {
13713             struct itimerspec its_new, its_old, *p_new;
13714 
13715             if (arg3) {
13716                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13717                     return -TARGET_EFAULT;
13718                 }
13719                 p_new = &its_new;
13720             } else {
13721                 p_new = NULL;
13722             }
13723 
13724             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13725 
13726             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13727                 return -TARGET_EFAULT;
13728             }
13729         }
13730         return ret;
13731 #endif
13732 
13733 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13734     case TARGET_NR_ioprio_get:
13735         return get_errno(ioprio_get(arg1, arg2));
13736 #endif
13737 
13738 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13739     case TARGET_NR_ioprio_set:
13740         return get_errno(ioprio_set(arg1, arg2, arg3));
13741 #endif
13742 
13743 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13744     case TARGET_NR_setns:
13745         return get_errno(setns(arg1, arg2));
13746 #endif
13747 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13748     case TARGET_NR_unshare:
13749         return get_errno(unshare(arg1));
13750 #endif
13751 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13752     case TARGET_NR_kcmp:
13753         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13754 #endif
13755 #ifdef TARGET_NR_swapcontext
13756     case TARGET_NR_swapcontext:
13757         /* PowerPC specific.  */
13758         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13759 #endif
13760 #ifdef TARGET_NR_memfd_create
13761     case TARGET_NR_memfd_create:
13762         p = lock_user_string(arg1);
13763         if (!p) {
13764             return -TARGET_EFAULT;
13765         }
13766         ret = get_errno(memfd_create(p, arg2));
13767         fd_trans_unregister(ret);
13768         unlock_user(p, arg1, 0);
13769         return ret;
13770 #endif
13771 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13772     case TARGET_NR_membarrier:
13773         return get_errno(membarrier(arg1, arg2));
13774 #endif
13775 
13776 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13777     case TARGET_NR_copy_file_range:
13778         {
13779             loff_t inoff, outoff;
13780             loff_t *pinoff = NULL, *poutoff = NULL;
13781 
13782             if (arg2) {
13783                 if (get_user_u64(inoff, arg2)) {
13784                     return -TARGET_EFAULT;
13785                 }
13786                 pinoff = &inoff;
13787             }
13788             if (arg4) {
13789                 if (get_user_u64(outoff, arg4)) {
13790                     return -TARGET_EFAULT;
13791                 }
13792                 poutoff = &outoff;
13793             }
13794             /* Do not sign-extend the count parameter. */
13795             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13796                                                  (abi_ulong)arg5, arg6));
13797             if (!is_error(ret) && ret > 0) {
13798                 if (arg2) {
13799                     if (put_user_u64(inoff, arg2)) {
13800                         return -TARGET_EFAULT;
13801                     }
13802                 }
13803                 if (arg4) {
13804                     if (put_user_u64(outoff, arg4)) {
13805                         return -TARGET_EFAULT;
13806                     }
13807                 }
13808             }
13809         }
13810         return ret;
13811 #endif
13812 
13813 #if defined(TARGET_NR_pivot_root)
13814     case TARGET_NR_pivot_root:
13815         {
13816             void *p2;
13817             p = lock_user_string(arg1); /* new_root */
13818             p2 = lock_user_string(arg2); /* put_old */
13819             if (!p || !p2) {
13820                 ret = -TARGET_EFAULT;
13821             } else {
13822                 ret = get_errno(pivot_root(p, p2));
13823             }
13824             unlock_user(p2, arg2, 0);
13825             unlock_user(p, arg1, 0);
13826         }
13827         return ret;
13828 #endif
13829 
13830 #if defined(TARGET_NR_riscv_hwprobe)
13831     case TARGET_NR_riscv_hwprobe:
13832         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13833 #endif
13834 
13835     default:
13836         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13837         return -TARGET_ENOSYS;
13838     }
13839     return ret;
13840 }
13841 
13842 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13843                     abi_long arg2, abi_long arg3, abi_long arg4,
13844                     abi_long arg5, abi_long arg6, abi_long arg7,
13845                     abi_long arg8)
13846 {
13847     CPUState *cpu = env_cpu(cpu_env);
13848     abi_long ret;
13849 
13850 #ifdef DEBUG_ERESTARTSYS
13851     /* Debug-only code for exercising the syscall-restart code paths
13852      * in the per-architecture cpu main loops: restart every syscall
13853      * the guest makes once before letting it through.
13854      */
13855     {
13856         static bool flag;
13857         flag = !flag;
13858         if (flag) {
13859             return -QEMU_ERESTARTSYS;
13860         }
13861     }
13862 #endif
13863 
13864     record_syscall_start(cpu, num, arg1,
13865                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13866 
13867     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13868         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13869     }
13870 
13871     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13872                       arg5, arg6, arg7, arg8);
13873 
13874     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13875         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13876                           arg3, arg4, arg5, arg6);
13877     }
13878 
13879     record_syscall_return(cpu, num, ret);
13880     return ret;
13881 }
13882