xref: /openbmc/qemu/linux-user/syscall.c (revision 6d2d454a)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 #include "cpu_loop-common.h"
144 
145 #ifndef CLONE_IO
146 #define CLONE_IO                0x80000000      /* Clone io context */
147 #endif
148 
149 /* We can't directly call the host clone syscall, because this will
150  * badly confuse libc (breaking mutexes, for example). So we must
151  * divide clone flags into:
152  *  * flag combinations that look like pthread_create()
153  *  * flag combinations that look like fork()
154  *  * flags we can implement within QEMU itself
155  *  * flags we can't support and will return an error for
156  */
157 /* For thread creation, all these flags must be present; for
158  * fork, none must be present.
159  */
160 #define CLONE_THREAD_FLAGS                              \
161     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
162      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 
164 /* These flags are ignored:
165  * CLONE_DETACHED is now ignored by the kernel;
166  * CLONE_IO is just an optimisation hint to the I/O scheduler
167  */
168 #define CLONE_IGNORED_FLAGS                     \
169     (CLONE_DETACHED | CLONE_IO)
170 
171 /* Flags for fork which we can implement within QEMU itself */
172 #define CLONE_OPTIONAL_FORK_FLAGS               \
173     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
174      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 
176 /* Flags for thread creation which we can implement within QEMU itself */
177 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
178     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
179      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 
181 #define CLONE_INVALID_FORK_FLAGS                                        \
182     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 
184 #define CLONE_INVALID_THREAD_FLAGS                                      \
185     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
186        CLONE_IGNORED_FLAGS))
187 
188 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
189  * have almost all been allocated. We cannot support any of
190  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
191  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
192  * The checks against the invalid thread masks above will catch these.
193  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
194  */
195 
196 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
197  * once. This exercises the codepaths for restart.
198  */
199 //#define DEBUG_ERESTARTSYS
200 
201 //#include <linux/msdos_fs.h>
202 #define VFAT_IOCTL_READDIR_BOTH \
203     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
204 #define VFAT_IOCTL_READDIR_SHORT \
205     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
206 
207 #undef _syscall0
208 #undef _syscall1
209 #undef _syscall2
210 #undef _syscall3
211 #undef _syscall4
212 #undef _syscall5
213 #undef _syscall6
214 
215 #define _syscall0(type,name)		\
216 static type name (void)			\
217 {					\
218 	return syscall(__NR_##name);	\
219 }
220 
221 #define _syscall1(type,name,type1,arg1)		\
222 static type name (type1 arg1)			\
223 {						\
224 	return syscall(__NR_##name, arg1);	\
225 }
226 
227 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
228 static type name (type1 arg1,type2 arg2)		\
229 {							\
230 	return syscall(__NR_##name, arg1, arg2);	\
231 }
232 
233 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
234 static type name (type1 arg1,type2 arg2,type3 arg3)		\
235 {								\
236 	return syscall(__NR_##name, arg1, arg2, arg3);		\
237 }
238 
239 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
241 {										\
242 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
243 }
244 
245 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
246 		  type5,arg5)							\
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
248 {										\
249 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
250 }
251 
252 
253 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
254 		  type5,arg5,type6,arg6)					\
255 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
256                   type6 arg6)							\
257 {										\
258 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
259 }
260 
261 
262 #define __NR_sys_uname __NR_uname
263 #define __NR_sys_getcwd1 __NR_getcwd
264 #define __NR_sys_getdents __NR_getdents
265 #define __NR_sys_getdents64 __NR_getdents64
266 #define __NR_sys_getpriority __NR_getpriority
267 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
268 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
269 #define __NR_sys_syslog __NR_syslog
270 #if defined(__NR_futex)
271 # define __NR_sys_futex __NR_futex
272 #endif
273 #if defined(__NR_futex_time64)
274 # define __NR_sys_futex_time64 __NR_futex_time64
275 #endif
276 #define __NR_sys_statx __NR_statx
277 
278 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
279 #define __NR__llseek __NR_lseek
280 #endif
281 
282 /* Newer kernel ports have llseek() instead of _llseek() */
283 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
284 #define TARGET_NR__llseek TARGET_NR_llseek
285 #endif
286 
287 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
288 #ifndef TARGET_O_NONBLOCK_MASK
289 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
290 #endif
291 
292 #define __NR_sys_gettid __NR_gettid
293 _syscall0(int, sys_gettid)
294 
295 /* For the 64-bit guest on 32-bit host case we must emulate
296  * getdents using getdents64, because otherwise the host
297  * might hand us back more dirent records than we can fit
298  * into the guest buffer after structure format conversion.
299  * Otherwise we emulate getdents with getdents if the host has it.
300  */
301 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
302 #define EMULATE_GETDENTS_WITH_GETDENTS
303 #endif
304 
305 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
306 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
307 #endif
308 #if (defined(TARGET_NR_getdents) && \
309       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
310     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
311 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
312 #endif
313 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
314 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
315           loff_t *, res, uint, wh);
316 #endif
317 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
318 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
319           siginfo_t *, uinfo)
320 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
321 #ifdef __NR_exit_group
322 _syscall1(int,exit_group,int,error_code)
323 #endif
324 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
325 #define __NR_sys_close_range __NR_close_range
326 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
327 #ifndef CLOSE_RANGE_CLOEXEC
328 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
329 #endif
330 #endif
331 #if defined(__NR_futex)
332 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
333           const struct timespec *,timeout,int *,uaddr2,int,val3)
334 #endif
335 #if defined(__NR_futex_time64)
336 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
337           const struct timespec *,timeout,int *,uaddr2,int,val3)
338 #endif
339 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
340 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
341 #endif
342 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
343 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
344                              unsigned int, flags);
345 #endif
346 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
347 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
348 #endif
349 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
350 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
351           unsigned long *, user_mask_ptr);
352 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
353 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
354           unsigned long *, user_mask_ptr);
355 /* sched_attr is not defined in glibc */
356 struct sched_attr {
357     uint32_t size;
358     uint32_t sched_policy;
359     uint64_t sched_flags;
360     int32_t sched_nice;
361     uint32_t sched_priority;
362     uint64_t sched_runtime;
363     uint64_t sched_deadline;
364     uint64_t sched_period;
365     uint32_t sched_util_min;
366     uint32_t sched_util_max;
367 };
368 #define __NR_sys_sched_getattr __NR_sched_getattr
369 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
370           unsigned int, size, unsigned int, flags);
371 #define __NR_sys_sched_setattr __NR_sched_setattr
372 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
373           unsigned int, flags);
374 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
375 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
376 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
377 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
378           const struct sched_param *, param);
379 #define __NR_sys_sched_getparam __NR_sched_getparam
380 _syscall2(int, sys_sched_getparam, pid_t, pid,
381           struct sched_param *, param);
382 #define __NR_sys_sched_setparam __NR_sched_setparam
383 _syscall2(int, sys_sched_setparam, pid_t, pid,
384           const struct sched_param *, param);
385 #define __NR_sys_getcpu __NR_getcpu
386 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
387 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
388           void *, arg);
389 _syscall2(int, capget, struct __user_cap_header_struct *, header,
390           struct __user_cap_data_struct *, data);
391 _syscall2(int, capset, struct __user_cap_header_struct *, header,
392           struct __user_cap_data_struct *, data);
393 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
394 _syscall2(int, ioprio_get, int, which, int, who)
395 #endif
396 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
397 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
398 #endif
399 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
400 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
401 #endif
402 
403 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
404 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
405           unsigned long, idx1, unsigned long, idx2)
406 #endif
407 
408 /*
409  * It is assumed that struct statx is architecture independent.
410  */
411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
412 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
413           unsigned int, mask, struct target_statx *, statxbuf)
414 #endif
415 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
416 _syscall2(int, membarrier, int, cmd, int, flags)
417 #endif
418 
419 static const bitmask_transtbl fcntl_flags_tbl[] = {
420   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
421   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
422   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
423   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
424   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
425   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
426   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
427   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
428   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
429   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
430   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
431   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
432   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
433 #if defined(O_DIRECT)
434   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
435 #endif
436 #if defined(O_NOATIME)
437   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
438 #endif
439 #if defined(O_CLOEXEC)
440   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
441 #endif
442 #if defined(O_PATH)
443   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
444 #endif
445 #if defined(O_TMPFILE)
446   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
447 #endif
448   /* Don't terminate the list prematurely on 64-bit host+guest.  */
449 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
450   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
451 #endif
452   { 0, 0, 0, 0 }
453 };
454 
455 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
456 
457 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
458 #if defined(__NR_utimensat)
459 #define __NR_sys_utimensat __NR_utimensat
460 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
461           const struct timespec *,tsp,int,flags)
462 #else
463 static int sys_utimensat(int dirfd, const char *pathname,
464                          const struct timespec times[2], int flags)
465 {
466     errno = ENOSYS;
467     return -1;
468 }
469 #endif
470 #endif /* TARGET_NR_utimensat */
471 
472 #ifdef TARGET_NR_renameat2
473 #if defined(__NR_renameat2)
474 #define __NR_sys_renameat2 __NR_renameat2
475 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
476           const char *, new, unsigned int, flags)
477 #else
478 static int sys_renameat2(int oldfd, const char *old,
479                          int newfd, const char *new, int flags)
480 {
481     if (flags == 0) {
482         return renameat(oldfd, old, newfd, new);
483     }
484     errno = ENOSYS;
485     return -1;
486 }
487 #endif
488 #endif /* TARGET_NR_renameat2 */
489 
490 #ifdef CONFIG_INOTIFY
491 #include <sys/inotify.h>
492 #else
493 /* Userspace can usually survive runtime without inotify */
494 #undef TARGET_NR_inotify_init
495 #undef TARGET_NR_inotify_init1
496 #undef TARGET_NR_inotify_add_watch
497 #undef TARGET_NR_inotify_rm_watch
498 #endif /* CONFIG_INOTIFY  */
499 
500 #if defined(TARGET_NR_prlimit64)
501 #ifndef __NR_prlimit64
502 # define __NR_prlimit64 -1
503 #endif
504 #define __NR_sys_prlimit64 __NR_prlimit64
505 /* The glibc rlimit structure may not be that used by the underlying syscall */
506 struct host_rlimit64 {
507     uint64_t rlim_cur;
508     uint64_t rlim_max;
509 };
510 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
511           const struct host_rlimit64 *, new_limit,
512           struct host_rlimit64 *, old_limit)
513 #endif
514 
515 
516 #if defined(TARGET_NR_timer_create)
517 /* Maximum of 32 active POSIX timers allowed at any one time. */
518 #define GUEST_TIMER_MAX 32
519 static timer_t g_posix_timers[GUEST_TIMER_MAX];
520 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
521 
522 static inline int next_free_host_timer(void)
523 {
524     int k;
525     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
526         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
527             return k;
528         }
529     }
530     return -1;
531 }
532 
533 static inline void free_host_timer_slot(int id)
534 {
535     qatomic_store_release(g_posix_timer_allocated + id, 0);
536 }
537 #endif
538 
539 static inline int host_to_target_errno(int host_errno)
540 {
541     switch (host_errno) {
542 #define E(X)  case X: return TARGET_##X;
543 #include "errnos.c.inc"
544 #undef E
545     default:
546         return host_errno;
547     }
548 }
549 
550 static inline int target_to_host_errno(int target_errno)
551 {
552     switch (target_errno) {
553 #define E(X)  case TARGET_##X: return X;
554 #include "errnos.c.inc"
555 #undef E
556     default:
557         return target_errno;
558     }
559 }
560 
561 abi_long get_errno(abi_long ret)
562 {
563     if (ret == -1)
564         return -host_to_target_errno(errno);
565     else
566         return ret;
567 }
568 
569 const char *target_strerror(int err)
570 {
571     if (err == QEMU_ERESTARTSYS) {
572         return "To be restarted";
573     }
574     if (err == QEMU_ESIGRETURN) {
575         return "Successful exit from sigreturn";
576     }
577 
578     return strerror(target_to_host_errno(err));
579 }
580 
581 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
582 {
583     int i;
584     uint8_t b;
585     if (usize <= ksize) {
586         return 1;
587     }
588     for (i = ksize; i < usize; i++) {
589         if (get_user_u8(b, addr + i)) {
590             return -TARGET_EFAULT;
591         }
592         if (b != 0) {
593             return 0;
594         }
595     }
596     return 1;
597 }
598 
599 #define safe_syscall0(type, name) \
600 static type safe_##name(void) \
601 { \
602     return safe_syscall(__NR_##name); \
603 }
604 
605 #define safe_syscall1(type, name, type1, arg1) \
606 static type safe_##name(type1 arg1) \
607 { \
608     return safe_syscall(__NR_##name, arg1); \
609 }
610 
611 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
612 static type safe_##name(type1 arg1, type2 arg2) \
613 { \
614     return safe_syscall(__NR_##name, arg1, arg2); \
615 }
616 
617 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
618 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
621 }
622 
623 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
624     type4, arg4) \
625 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
626 { \
627     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
628 }
629 
630 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
631     type4, arg4, type5, arg5) \
632 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
633     type5 arg5) \
634 { \
635     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
636 }
637 
638 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
639     type4, arg4, type5, arg5, type6, arg6) \
640 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641     type5 arg5, type6 arg6) \
642 { \
643     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
644 }
645 
646 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
647 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
648 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
649               int, flags, mode_t, mode)
650 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
651 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
652               struct rusage *, rusage)
653 #endif
654 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
655               int, options, struct rusage *, rusage)
656 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
657               char **, argv, char **, envp, int, flags)
658 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
659     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
660 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
661               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
662 #endif
663 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
664 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
665               struct timespec *, tsp, const sigset_t *, sigmask,
666               size_t, sigsetsize)
667 #endif
668 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
669               int, maxevents, int, timeout, const sigset_t *, sigmask,
670               size_t, sigsetsize)
671 #if defined(__NR_futex)
672 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
673               const struct timespec *,timeout,int *,uaddr2,int,val3)
674 #endif
675 #if defined(__NR_futex_time64)
676 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
677               const struct timespec *,timeout,int *,uaddr2,int,val3)
678 #endif
679 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
680 safe_syscall2(int, kill, pid_t, pid, int, sig)
681 safe_syscall2(int, tkill, int, tid, int, sig)
682 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
683 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
684 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
685 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
686               unsigned long, pos_l, unsigned long, pos_h)
687 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
688               unsigned long, pos_l, unsigned long, pos_h)
689 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
690               socklen_t, addrlen)
691 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
692               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
693 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
694               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
695 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
696 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
697 safe_syscall2(int, flock, int, fd, int, operation)
698 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
699 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
700               const struct timespec *, uts, size_t, sigsetsize)
701 #endif
702 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
703               int, flags)
704 #if defined(TARGET_NR_nanosleep)
705 safe_syscall2(int, nanosleep, const struct timespec *, req,
706               struct timespec *, rem)
707 #endif
708 #if defined(TARGET_NR_clock_nanosleep) || \
709     defined(TARGET_NR_clock_nanosleep_time64)
710 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
711               const struct timespec *, req, struct timespec *, rem)
712 #endif
713 #ifdef __NR_ipc
714 #ifdef __s390x__
715 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
716               void *, ptr)
717 #else
718 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
719               void *, ptr, long, fifth)
720 #endif
721 #endif
722 #ifdef __NR_msgsnd
723 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
724               int, flags)
725 #endif
726 #ifdef __NR_msgrcv
727 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
728               long, msgtype, int, flags)
729 #endif
730 #ifdef __NR_semtimedop
731 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
732               unsigned, nsops, const struct timespec *, timeout)
733 #endif
734 #if defined(TARGET_NR_mq_timedsend) || \
735     defined(TARGET_NR_mq_timedsend_time64)
736 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
737               size_t, len, unsigned, prio, const struct timespec *, timeout)
738 #endif
739 #if defined(TARGET_NR_mq_timedreceive) || \
740     defined(TARGET_NR_mq_timedreceive_time64)
741 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
742               size_t, len, unsigned *, prio, const struct timespec *, timeout)
743 #endif
744 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
745 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
746               int, outfd, loff_t *, poutoff, size_t, length,
747               unsigned int, flags)
748 #endif
749 
750 /* We do ioctl like this rather than via safe_syscall3 to preserve the
751  * "third argument might be integer or pointer or not present" behaviour of
752  * the libc function.
753  */
754 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
755 /* Similarly for fcntl. Note that callers must always:
756  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
757  *  use the flock64 struct rather than unsuffixed flock
758  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
759  */
760 #ifdef __NR_fcntl64
761 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
762 #else
763 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
764 #endif
765 
766 static inline int host_to_target_sock_type(int host_type)
767 {
768     int target_type;
769 
770     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
771     case SOCK_DGRAM:
772         target_type = TARGET_SOCK_DGRAM;
773         break;
774     case SOCK_STREAM:
775         target_type = TARGET_SOCK_STREAM;
776         break;
777     default:
778         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
779         break;
780     }
781 
782 #if defined(SOCK_CLOEXEC)
783     if (host_type & SOCK_CLOEXEC) {
784         target_type |= TARGET_SOCK_CLOEXEC;
785     }
786 #endif
787 
788 #if defined(SOCK_NONBLOCK)
789     if (host_type & SOCK_NONBLOCK) {
790         target_type |= TARGET_SOCK_NONBLOCK;
791     }
792 #endif
793 
794     return target_type;
795 }
796 
797 static abi_ulong target_brk;
798 static abi_ulong target_original_brk;
799 static abi_ulong brk_page;
800 
801 void target_set_brk(abi_ulong new_brk)
802 {
803     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
804     brk_page = HOST_PAGE_ALIGN(target_brk);
805 }
806 
807 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
808 #define DEBUGF_BRK(message, args...)
809 
810 /* do_brk() must return target values and target errnos. */
811 abi_long do_brk(abi_ulong new_brk)
812 {
813     abi_long mapped_addr;
814     abi_ulong new_alloc_size;
815 
816     /* brk pointers are always untagged */
817 
818     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
819 
820     if (!new_brk) {
821         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
822         return target_brk;
823     }
824     if (new_brk < target_original_brk) {
825         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
826                    target_brk);
827         return target_brk;
828     }
829 
830     /* If the new brk is less than the highest page reserved to the
831      * target heap allocation, set it and we're almost done...  */
832     if (new_brk <= brk_page) {
833         /* Heap contents are initialized to zero, as for anonymous
834          * mapped pages.  */
835         if (new_brk > target_brk) {
836             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
837         }
838 	target_brk = new_brk;
839         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
840 	return target_brk;
841     }
842 
843     /* We need to allocate more memory after the brk... Note that
844      * we don't use MAP_FIXED because that will map over the top of
845      * any existing mapping (like the one with the host libc or qemu
846      * itself); instead we treat "mapped but at wrong address" as
847      * a failure and unmap again.
848      */
849     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
850     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
851                                         PROT_READ|PROT_WRITE,
852                                         MAP_ANON|MAP_PRIVATE, 0, 0));
853 
854     if (mapped_addr == brk_page) {
855         /* Heap contents are initialized to zero, as for anonymous
856          * mapped pages.  Technically the new pages are already
857          * initialized to zero since they *are* anonymous mapped
858          * pages, however we have to take care with the contents that
859          * come from the remaining part of the previous page: it may
860          * contains garbage data due to a previous heap usage (grown
861          * then shrunken).  */
862         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
863 
864         target_brk = new_brk;
865         brk_page = HOST_PAGE_ALIGN(target_brk);
866         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
867             target_brk);
868         return target_brk;
869     } else if (mapped_addr != -1) {
870         /* Mapped but at wrong address, meaning there wasn't actually
871          * enough space for this brk.
872          */
873         target_munmap(mapped_addr, new_alloc_size);
874         mapped_addr = -1;
875         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
876     }
877     else {
878         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
879     }
880 
881 #if defined(TARGET_ALPHA)
882     /* We (partially) emulate OSF/1 on Alpha, which requires we
883        return a proper errno, not an unchanged brk value.  */
884     return -TARGET_ENOMEM;
885 #endif
886     /* For everything else, return the previous break. */
887     return target_brk;
888 }
889 
890 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
891     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
892 static inline abi_long copy_from_user_fdset(fd_set *fds,
893                                             abi_ulong target_fds_addr,
894                                             int n)
895 {
896     int i, nw, j, k;
897     abi_ulong b, *target_fds;
898 
899     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
900     if (!(target_fds = lock_user(VERIFY_READ,
901                                  target_fds_addr,
902                                  sizeof(abi_ulong) * nw,
903                                  1)))
904         return -TARGET_EFAULT;
905 
906     FD_ZERO(fds);
907     k = 0;
908     for (i = 0; i < nw; i++) {
909         /* grab the abi_ulong */
910         __get_user(b, &target_fds[i]);
911         for (j = 0; j < TARGET_ABI_BITS; j++) {
912             /* check the bit inside the abi_ulong */
913             if ((b >> j) & 1)
914                 FD_SET(k, fds);
915             k++;
916         }
917     }
918 
919     unlock_user(target_fds, target_fds_addr, 0);
920 
921     return 0;
922 }
923 
924 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
925                                                  abi_ulong target_fds_addr,
926                                                  int n)
927 {
928     if (target_fds_addr) {
929         if (copy_from_user_fdset(fds, target_fds_addr, n))
930             return -TARGET_EFAULT;
931         *fds_ptr = fds;
932     } else {
933         *fds_ptr = NULL;
934     }
935     return 0;
936 }
937 
938 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
939                                           const fd_set *fds,
940                                           int n)
941 {
942     int i, nw, j, k;
943     abi_long v;
944     abi_ulong *target_fds;
945 
946     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947     if (!(target_fds = lock_user(VERIFY_WRITE,
948                                  target_fds_addr,
949                                  sizeof(abi_ulong) * nw,
950                                  0)))
951         return -TARGET_EFAULT;
952 
953     k = 0;
954     for (i = 0; i < nw; i++) {
955         v = 0;
956         for (j = 0; j < TARGET_ABI_BITS; j++) {
957             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
958             k++;
959         }
960         __put_user(v, &target_fds[i]);
961     }
962 
963     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
964 
965     return 0;
966 }
967 #endif
968 
969 #if defined(__alpha__)
970 #define HOST_HZ 1024
971 #else
972 #define HOST_HZ 100
973 #endif
974 
975 static inline abi_long host_to_target_clock_t(long ticks)
976 {
977 #if HOST_HZ == TARGET_HZ
978     return ticks;
979 #else
980     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
981 #endif
982 }
983 
984 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
985                                              const struct rusage *rusage)
986 {
987     struct target_rusage *target_rusage;
988 
989     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
990         return -TARGET_EFAULT;
991     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
992     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
993     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
994     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
995     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
996     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
997     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
998     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
999     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1000     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1001     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1002     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1003     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1004     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1005     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1006     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1007     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1008     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1009     unlock_user_struct(target_rusage, target_addr, 1);
1010 
1011     return 0;
1012 }
1013 
1014 #ifdef TARGET_NR_setrlimit
1015 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1016 {
1017     abi_ulong target_rlim_swap;
1018     rlim_t result;
1019 
1020     target_rlim_swap = tswapal(target_rlim);
1021     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1022         return RLIM_INFINITY;
1023 
1024     result = target_rlim_swap;
1025     if (target_rlim_swap != (rlim_t)result)
1026         return RLIM_INFINITY;
1027 
1028     return result;
1029 }
1030 #endif
1031 
1032 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1033 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1034 {
1035     abi_ulong target_rlim_swap;
1036     abi_ulong result;
1037 
1038     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1039         target_rlim_swap = TARGET_RLIM_INFINITY;
1040     else
1041         target_rlim_swap = rlim;
1042     result = tswapal(target_rlim_swap);
1043 
1044     return result;
1045 }
1046 #endif
1047 
1048 static inline int target_to_host_resource(int code)
1049 {
1050     switch (code) {
1051     case TARGET_RLIMIT_AS:
1052         return RLIMIT_AS;
1053     case TARGET_RLIMIT_CORE:
1054         return RLIMIT_CORE;
1055     case TARGET_RLIMIT_CPU:
1056         return RLIMIT_CPU;
1057     case TARGET_RLIMIT_DATA:
1058         return RLIMIT_DATA;
1059     case TARGET_RLIMIT_FSIZE:
1060         return RLIMIT_FSIZE;
1061     case TARGET_RLIMIT_LOCKS:
1062         return RLIMIT_LOCKS;
1063     case TARGET_RLIMIT_MEMLOCK:
1064         return RLIMIT_MEMLOCK;
1065     case TARGET_RLIMIT_MSGQUEUE:
1066         return RLIMIT_MSGQUEUE;
1067     case TARGET_RLIMIT_NICE:
1068         return RLIMIT_NICE;
1069     case TARGET_RLIMIT_NOFILE:
1070         return RLIMIT_NOFILE;
1071     case TARGET_RLIMIT_NPROC:
1072         return RLIMIT_NPROC;
1073     case TARGET_RLIMIT_RSS:
1074         return RLIMIT_RSS;
1075     case TARGET_RLIMIT_RTPRIO:
1076         return RLIMIT_RTPRIO;
1077 #ifdef RLIMIT_RTTIME
1078     case TARGET_RLIMIT_RTTIME:
1079         return RLIMIT_RTTIME;
1080 #endif
1081     case TARGET_RLIMIT_SIGPENDING:
1082         return RLIMIT_SIGPENDING;
1083     case TARGET_RLIMIT_STACK:
1084         return RLIMIT_STACK;
1085     default:
1086         return code;
1087     }
1088 }
1089 
1090 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1091                                               abi_ulong target_tv_addr)
1092 {
1093     struct target_timeval *target_tv;
1094 
1095     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1096         return -TARGET_EFAULT;
1097     }
1098 
1099     __get_user(tv->tv_sec, &target_tv->tv_sec);
1100     __get_user(tv->tv_usec, &target_tv->tv_usec);
1101 
1102     unlock_user_struct(target_tv, target_tv_addr, 0);
1103 
1104     return 0;
1105 }
1106 
1107 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1108                                             const struct timeval *tv)
1109 {
1110     struct target_timeval *target_tv;
1111 
1112     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1113         return -TARGET_EFAULT;
1114     }
1115 
1116     __put_user(tv->tv_sec, &target_tv->tv_sec);
1117     __put_user(tv->tv_usec, &target_tv->tv_usec);
1118 
1119     unlock_user_struct(target_tv, target_tv_addr, 1);
1120 
1121     return 0;
1122 }
1123 
1124 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1125 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1126                                                 abi_ulong target_tv_addr)
1127 {
1128     struct target__kernel_sock_timeval *target_tv;
1129 
1130     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1131         return -TARGET_EFAULT;
1132     }
1133 
1134     __get_user(tv->tv_sec, &target_tv->tv_sec);
1135     __get_user(tv->tv_usec, &target_tv->tv_usec);
1136 
1137     unlock_user_struct(target_tv, target_tv_addr, 0);
1138 
1139     return 0;
1140 }
1141 #endif
1142 
1143 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1144                                               const struct timeval *tv)
1145 {
1146     struct target__kernel_sock_timeval *target_tv;
1147 
1148     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1149         return -TARGET_EFAULT;
1150     }
1151 
1152     __put_user(tv->tv_sec, &target_tv->tv_sec);
1153     __put_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 1);
1156 
1157     return 0;
1158 }
1159 
1160 #if defined(TARGET_NR_futex) || \
1161     defined(TARGET_NR_rt_sigtimedwait) || \
1162     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1163     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1164     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1165     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1166     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1167     defined(TARGET_NR_timer_settime) || \
1168     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1169 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1170                                                abi_ulong target_addr)
1171 {
1172     struct target_timespec *target_ts;
1173 
1174     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1175         return -TARGET_EFAULT;
1176     }
1177     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1178     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179     unlock_user_struct(target_ts, target_addr, 0);
1180     return 0;
1181 }
1182 #endif
1183 
1184 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1185     defined(TARGET_NR_timer_settime64) || \
1186     defined(TARGET_NR_mq_timedsend_time64) || \
1187     defined(TARGET_NR_mq_timedreceive_time64) || \
1188     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1189     defined(TARGET_NR_clock_nanosleep_time64) || \
1190     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1191     defined(TARGET_NR_utimensat) || \
1192     defined(TARGET_NR_utimensat_time64) || \
1193     defined(TARGET_NR_semtimedop_time64) || \
1194     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1195 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1196                                                  abi_ulong target_addr)
1197 {
1198     struct target__kernel_timespec *target_ts;
1199 
1200     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1204     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1205     /* in 32bit mode, this drops the padding */
1206     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1207     unlock_user_struct(target_ts, target_addr, 0);
1208     return 0;
1209 }
1210 #endif
1211 
1212 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1213                                                struct timespec *host_ts)
1214 {
1215     struct target_timespec *target_ts;
1216 
1217     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1218         return -TARGET_EFAULT;
1219     }
1220     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1221     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1222     unlock_user_struct(target_ts, target_addr, 1);
1223     return 0;
1224 }
1225 
1226 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1227                                                  struct timespec *host_ts)
1228 {
1229     struct target__kernel_timespec *target_ts;
1230 
1231     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1232         return -TARGET_EFAULT;
1233     }
1234     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1235     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236     unlock_user_struct(target_ts, target_addr, 1);
1237     return 0;
1238 }
1239 
1240 #if defined(TARGET_NR_gettimeofday)
1241 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1242                                              struct timezone *tz)
1243 {
1244     struct target_timezone *target_tz;
1245 
1246     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1247         return -TARGET_EFAULT;
1248     }
1249 
1250     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1251     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1252 
1253     unlock_user_struct(target_tz, target_tz_addr, 1);
1254 
1255     return 0;
1256 }
1257 #endif
1258 
1259 #if defined(TARGET_NR_settimeofday)
1260 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1261                                                abi_ulong target_tz_addr)
1262 {
1263     struct target_timezone *target_tz;
1264 
1265     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1266         return -TARGET_EFAULT;
1267     }
1268 
1269     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1270     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1271 
1272     unlock_user_struct(target_tz, target_tz_addr, 0);
1273 
1274     return 0;
1275 }
1276 #endif
1277 
1278 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1279 #include <mqueue.h>
1280 
1281 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1282                                               abi_ulong target_mq_attr_addr)
1283 {
1284     struct target_mq_attr *target_mq_attr;
1285 
1286     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1287                           target_mq_attr_addr, 1))
1288         return -TARGET_EFAULT;
1289 
1290     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1291     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1292     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1293     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1294 
1295     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1296 
1297     return 0;
1298 }
1299 
1300 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1301                                             const struct mq_attr *attr)
1302 {
1303     struct target_mq_attr *target_mq_attr;
1304 
1305     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1306                           target_mq_attr_addr, 0))
1307         return -TARGET_EFAULT;
1308 
1309     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1310     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1311     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1312     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1313 
1314     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1315 
1316     return 0;
1317 }
1318 #endif
1319 
1320 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1321 /* do_select() must return target values and target errnos. */
1322 static abi_long do_select(int n,
1323                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1324                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1325 {
1326     fd_set rfds, wfds, efds;
1327     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1328     struct timeval tv;
1329     struct timespec ts, *ts_ptr;
1330     abi_long ret;
1331 
1332     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1333     if (ret) {
1334         return ret;
1335     }
1336     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1337     if (ret) {
1338         return ret;
1339     }
1340     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1341     if (ret) {
1342         return ret;
1343     }
1344 
1345     if (target_tv_addr) {
1346         if (copy_from_user_timeval(&tv, target_tv_addr))
1347             return -TARGET_EFAULT;
1348         ts.tv_sec = tv.tv_sec;
1349         ts.tv_nsec = tv.tv_usec * 1000;
1350         ts_ptr = &ts;
1351     } else {
1352         ts_ptr = NULL;
1353     }
1354 
1355     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1356                                   ts_ptr, NULL));
1357 
1358     if (!is_error(ret)) {
1359         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1360             return -TARGET_EFAULT;
1361         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1362             return -TARGET_EFAULT;
1363         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1364             return -TARGET_EFAULT;
1365 
1366         if (target_tv_addr) {
1367             tv.tv_sec = ts.tv_sec;
1368             tv.tv_usec = ts.tv_nsec / 1000;
1369             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1370                 return -TARGET_EFAULT;
1371             }
1372         }
1373     }
1374 
1375     return ret;
1376 }
1377 
1378 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1379 static abi_long do_old_select(abi_ulong arg1)
1380 {
1381     struct target_sel_arg_struct *sel;
1382     abi_ulong inp, outp, exp, tvp;
1383     long nsel;
1384 
1385     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1386         return -TARGET_EFAULT;
1387     }
1388 
1389     nsel = tswapal(sel->n);
1390     inp = tswapal(sel->inp);
1391     outp = tswapal(sel->outp);
1392     exp = tswapal(sel->exp);
1393     tvp = tswapal(sel->tvp);
1394 
1395     unlock_user_struct(sel, arg1, 0);
1396 
1397     return do_select(nsel, inp, outp, exp, tvp);
1398 }
1399 #endif
1400 #endif
1401 
1402 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1403 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1404                             abi_long arg4, abi_long arg5, abi_long arg6,
1405                             bool time64)
1406 {
1407     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1408     fd_set rfds, wfds, efds;
1409     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1410     struct timespec ts, *ts_ptr;
1411     abi_long ret;
1412 
1413     /*
1414      * The 6th arg is actually two args smashed together,
1415      * so we cannot use the C library.
1416      */
1417     struct {
1418         sigset_t *set;
1419         size_t size;
1420     } sig, *sig_ptr;
1421 
1422     abi_ulong arg_sigset, arg_sigsize, *arg7;
1423 
1424     n = arg1;
1425     rfd_addr = arg2;
1426     wfd_addr = arg3;
1427     efd_addr = arg4;
1428     ts_addr = arg5;
1429 
1430     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1431     if (ret) {
1432         return ret;
1433     }
1434     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1435     if (ret) {
1436         return ret;
1437     }
1438     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1439     if (ret) {
1440         return ret;
1441     }
1442 
1443     /*
1444      * This takes a timespec, and not a timeval, so we cannot
1445      * use the do_select() helper ...
1446      */
1447     if (ts_addr) {
1448         if (time64) {
1449             if (target_to_host_timespec64(&ts, ts_addr)) {
1450                 return -TARGET_EFAULT;
1451             }
1452         } else {
1453             if (target_to_host_timespec(&ts, ts_addr)) {
1454                 return -TARGET_EFAULT;
1455             }
1456         }
1457             ts_ptr = &ts;
1458     } else {
1459         ts_ptr = NULL;
1460     }
1461 
1462     /* Extract the two packed args for the sigset */
1463     sig_ptr = NULL;
1464     if (arg6) {
1465         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1466         if (!arg7) {
1467             return -TARGET_EFAULT;
1468         }
1469         arg_sigset = tswapal(arg7[0]);
1470         arg_sigsize = tswapal(arg7[1]);
1471         unlock_user(arg7, arg6, 0);
1472 
1473         if (arg_sigset) {
1474             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1475             if (ret != 0) {
1476                 return ret;
1477             }
1478             sig_ptr = &sig;
1479             sig.size = SIGSET_T_SIZE;
1480         }
1481     }
1482 
1483     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1484                                   ts_ptr, sig_ptr));
1485 
1486     if (sig_ptr) {
1487         finish_sigsuspend_mask(ret);
1488     }
1489 
1490     if (!is_error(ret)) {
1491         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1492             return -TARGET_EFAULT;
1493         }
1494         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1495             return -TARGET_EFAULT;
1496         }
1497         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (time64) {
1501             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1502                 return -TARGET_EFAULT;
1503             }
1504         } else {
1505             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1506                 return -TARGET_EFAULT;
1507             }
1508         }
1509     }
1510     return ret;
1511 }
1512 #endif
1513 
1514 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1515     defined(TARGET_NR_ppoll_time64)
1516 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1517                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1518 {
1519     struct target_pollfd *target_pfd;
1520     unsigned int nfds = arg2;
1521     struct pollfd *pfd;
1522     unsigned int i;
1523     abi_long ret;
1524 
1525     pfd = NULL;
1526     target_pfd = NULL;
1527     if (nfds) {
1528         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1529             return -TARGET_EINVAL;
1530         }
1531         target_pfd = lock_user(VERIFY_WRITE, arg1,
1532                                sizeof(struct target_pollfd) * nfds, 1);
1533         if (!target_pfd) {
1534             return -TARGET_EFAULT;
1535         }
1536 
1537         pfd = alloca(sizeof(struct pollfd) * nfds);
1538         for (i = 0; i < nfds; i++) {
1539             pfd[i].fd = tswap32(target_pfd[i].fd);
1540             pfd[i].events = tswap16(target_pfd[i].events);
1541         }
1542     }
1543     if (ppoll) {
1544         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1545         sigset_t *set = NULL;
1546 
1547         if (arg3) {
1548             if (time64) {
1549                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1550                     unlock_user(target_pfd, arg1, 0);
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (target_to_host_timespec(timeout_ts, arg3)) {
1555                     unlock_user(target_pfd, arg1, 0);
1556                     return -TARGET_EFAULT;
1557                 }
1558             }
1559         } else {
1560             timeout_ts = NULL;
1561         }
1562 
1563         if (arg4) {
1564             ret = process_sigsuspend_mask(&set, arg4, arg5);
1565             if (ret != 0) {
1566                 unlock_user(target_pfd, arg1, 0);
1567                 return ret;
1568             }
1569         }
1570 
1571         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1572                                    set, SIGSET_T_SIZE));
1573 
1574         if (set) {
1575             finish_sigsuspend_mask(ret);
1576         }
1577         if (!is_error(ret) && arg3) {
1578             if (time64) {
1579                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1580                     return -TARGET_EFAULT;
1581                 }
1582             } else {
1583                 if (host_to_target_timespec(arg3, timeout_ts)) {
1584                     return -TARGET_EFAULT;
1585                 }
1586             }
1587         }
1588     } else {
1589           struct timespec ts, *pts;
1590 
1591           if (arg3 >= 0) {
1592               /* Convert ms to secs, ns */
1593               ts.tv_sec = arg3 / 1000;
1594               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1595               pts = &ts;
1596           } else {
1597               /* -ve poll() timeout means "infinite" */
1598               pts = NULL;
1599           }
1600           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1601     }
1602 
1603     if (!is_error(ret)) {
1604         for (i = 0; i < nfds; i++) {
1605             target_pfd[i].revents = tswap16(pfd[i].revents);
1606         }
1607     }
1608     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1609     return ret;
1610 }
1611 #endif
1612 
1613 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1614                         int flags, int is_pipe2)
1615 {
1616     int host_pipe[2];
1617     abi_long ret;
1618     ret = pipe2(host_pipe, flags);
1619 
1620     if (is_error(ret))
1621         return get_errno(ret);
1622 
1623     /* Several targets have special calling conventions for the original
1624        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1625     if (!is_pipe2) {
1626 #if defined(TARGET_ALPHA)
1627         cpu_env->ir[IR_A4] = host_pipe[1];
1628         return host_pipe[0];
1629 #elif defined(TARGET_MIPS)
1630         cpu_env->active_tc.gpr[3] = host_pipe[1];
1631         return host_pipe[0];
1632 #elif defined(TARGET_SH4)
1633         cpu_env->gregs[1] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_SPARC)
1636         cpu_env->regwptr[1] = host_pipe[1];
1637         return host_pipe[0];
1638 #endif
1639     }
1640 
1641     if (put_user_s32(host_pipe[0], pipedes)
1642         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1643         return -TARGET_EFAULT;
1644     return get_errno(ret);
1645 }
1646 
1647 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1648                                               abi_ulong target_addr,
1649                                               socklen_t len)
1650 {
1651     struct target_ip_mreqn *target_smreqn;
1652 
1653     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1654     if (!target_smreqn)
1655         return -TARGET_EFAULT;
1656     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1657     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1658     if (len == sizeof(struct target_ip_mreqn))
1659         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1660     unlock_user(target_smreqn, target_addr, 0);
1661 
1662     return 0;
1663 }
1664 
1665 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1666                                                abi_ulong target_addr,
1667                                                socklen_t len)
1668 {
1669     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1670     sa_family_t sa_family;
1671     struct target_sockaddr *target_saddr;
1672 
1673     if (fd_trans_target_to_host_addr(fd)) {
1674         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1675     }
1676 
1677     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1678     if (!target_saddr)
1679         return -TARGET_EFAULT;
1680 
1681     sa_family = tswap16(target_saddr->sa_family);
1682 
1683     /* Oops. The caller might send a incomplete sun_path; sun_path
1684      * must be terminated by \0 (see the manual page), but
1685      * unfortunately it is quite common to specify sockaddr_un
1686      * length as "strlen(x->sun_path)" while it should be
1687      * "strlen(...) + 1". We'll fix that here if needed.
1688      * Linux kernel has a similar feature.
1689      */
1690 
1691     if (sa_family == AF_UNIX) {
1692         if (len < unix_maxlen && len > 0) {
1693             char *cp = (char*)target_saddr;
1694 
1695             if ( cp[len-1] && !cp[len] )
1696                 len++;
1697         }
1698         if (len > unix_maxlen)
1699             len = unix_maxlen;
1700     }
1701 
1702     memcpy(addr, target_saddr, len);
1703     addr->sa_family = sa_family;
1704     if (sa_family == AF_NETLINK) {
1705         struct sockaddr_nl *nladdr;
1706 
1707         nladdr = (struct sockaddr_nl *)addr;
1708         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1709         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1710     } else if (sa_family == AF_PACKET) {
1711 	struct target_sockaddr_ll *lladdr;
1712 
1713 	lladdr = (struct target_sockaddr_ll *)addr;
1714 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1715 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1716     }
1717     unlock_user(target_saddr, target_addr, 0);
1718 
1719     return 0;
1720 }
1721 
1722 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1723                                                struct sockaddr *addr,
1724                                                socklen_t len)
1725 {
1726     struct target_sockaddr *target_saddr;
1727 
1728     if (len == 0) {
1729         return 0;
1730     }
1731     assert(addr);
1732 
1733     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1734     if (!target_saddr)
1735         return -TARGET_EFAULT;
1736     memcpy(target_saddr, addr, len);
1737     if (len >= offsetof(struct target_sockaddr, sa_family) +
1738         sizeof(target_saddr->sa_family)) {
1739         target_saddr->sa_family = tswap16(addr->sa_family);
1740     }
1741     if (addr->sa_family == AF_NETLINK &&
1742         len >= sizeof(struct target_sockaddr_nl)) {
1743         struct target_sockaddr_nl *target_nl =
1744                (struct target_sockaddr_nl *)target_saddr;
1745         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1746         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1747     } else if (addr->sa_family == AF_PACKET) {
1748         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1749         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1750         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1751     } else if (addr->sa_family == AF_INET6 &&
1752                len >= sizeof(struct target_sockaddr_in6)) {
1753         struct target_sockaddr_in6 *target_in6 =
1754                (struct target_sockaddr_in6 *)target_saddr;
1755         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1756     }
1757     unlock_user(target_saddr, target_addr, len);
1758 
1759     return 0;
1760 }
1761 
1762 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1763                                            struct target_msghdr *target_msgh)
1764 {
1765     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1766     abi_long msg_controllen;
1767     abi_ulong target_cmsg_addr;
1768     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1769     socklen_t space = 0;
1770 
1771     msg_controllen = tswapal(target_msgh->msg_controllen);
1772     if (msg_controllen < sizeof (struct target_cmsghdr))
1773         goto the_end;
1774     target_cmsg_addr = tswapal(target_msgh->msg_control);
1775     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1776     target_cmsg_start = target_cmsg;
1777     if (!target_cmsg)
1778         return -TARGET_EFAULT;
1779 
1780     while (cmsg && target_cmsg) {
1781         void *data = CMSG_DATA(cmsg);
1782         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1783 
1784         int len = tswapal(target_cmsg->cmsg_len)
1785             - sizeof(struct target_cmsghdr);
1786 
1787         space += CMSG_SPACE(len);
1788         if (space > msgh->msg_controllen) {
1789             space -= CMSG_SPACE(len);
1790             /* This is a QEMU bug, since we allocated the payload
1791              * area ourselves (unlike overflow in host-to-target
1792              * conversion, which is just the guest giving us a buffer
1793              * that's too small). It can't happen for the payload types
1794              * we currently support; if it becomes an issue in future
1795              * we would need to improve our allocation strategy to
1796              * something more intelligent than "twice the size of the
1797              * target buffer we're reading from".
1798              */
1799             qemu_log_mask(LOG_UNIMP,
1800                           ("Unsupported ancillary data %d/%d: "
1801                            "unhandled msg size\n"),
1802                           tswap32(target_cmsg->cmsg_level),
1803                           tswap32(target_cmsg->cmsg_type));
1804             break;
1805         }
1806 
1807         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1808             cmsg->cmsg_level = SOL_SOCKET;
1809         } else {
1810             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1811         }
1812         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1813         cmsg->cmsg_len = CMSG_LEN(len);
1814 
1815         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1816             int *fd = (int *)data;
1817             int *target_fd = (int *)target_data;
1818             int i, numfds = len / sizeof(int);
1819 
1820             for (i = 0; i < numfds; i++) {
1821                 __get_user(fd[i], target_fd + i);
1822             }
1823         } else if (cmsg->cmsg_level == SOL_SOCKET
1824                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1825             struct ucred *cred = (struct ucred *)data;
1826             struct target_ucred *target_cred =
1827                 (struct target_ucred *)target_data;
1828 
1829             __get_user(cred->pid, &target_cred->pid);
1830             __get_user(cred->uid, &target_cred->uid);
1831             __get_user(cred->gid, &target_cred->gid);
1832         } else if (cmsg->cmsg_level == SOL_ALG) {
1833             uint32_t *dst = (uint32_t *)data;
1834 
1835             memcpy(dst, target_data, len);
1836             /* fix endianess of first 32-bit word */
1837             if (len >= sizeof(uint32_t)) {
1838                 *dst = tswap32(*dst);
1839             }
1840         } else {
1841             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1842                           cmsg->cmsg_level, cmsg->cmsg_type);
1843             memcpy(data, target_data, len);
1844         }
1845 
1846         cmsg = CMSG_NXTHDR(msgh, cmsg);
1847         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1848                                          target_cmsg_start);
1849     }
1850     unlock_user(target_cmsg, target_cmsg_addr, 0);
1851  the_end:
1852     msgh->msg_controllen = space;
1853     return 0;
1854 }
1855 
1856 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1857                                            struct msghdr *msgh)
1858 {
1859     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1860     abi_long msg_controllen;
1861     abi_ulong target_cmsg_addr;
1862     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1863     socklen_t space = 0;
1864 
1865     msg_controllen = tswapal(target_msgh->msg_controllen);
1866     if (msg_controllen < sizeof (struct target_cmsghdr))
1867         goto the_end;
1868     target_cmsg_addr = tswapal(target_msgh->msg_control);
1869     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1870     target_cmsg_start = target_cmsg;
1871     if (!target_cmsg)
1872         return -TARGET_EFAULT;
1873 
1874     while (cmsg && target_cmsg) {
1875         void *data = CMSG_DATA(cmsg);
1876         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1877 
1878         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1879         int tgt_len, tgt_space;
1880 
1881         /* We never copy a half-header but may copy half-data;
1882          * this is Linux's behaviour in put_cmsg(). Note that
1883          * truncation here is a guest problem (which we report
1884          * to the guest via the CTRUNC bit), unlike truncation
1885          * in target_to_host_cmsg, which is a QEMU bug.
1886          */
1887         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1888             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1889             break;
1890         }
1891 
1892         if (cmsg->cmsg_level == SOL_SOCKET) {
1893             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1894         } else {
1895             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1896         }
1897         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1898 
1899         /* Payload types which need a different size of payload on
1900          * the target must adjust tgt_len here.
1901          */
1902         tgt_len = len;
1903         switch (cmsg->cmsg_level) {
1904         case SOL_SOCKET:
1905             switch (cmsg->cmsg_type) {
1906             case SO_TIMESTAMP:
1907                 tgt_len = sizeof(struct target_timeval);
1908                 break;
1909             default:
1910                 break;
1911             }
1912             break;
1913         default:
1914             break;
1915         }
1916 
1917         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1918             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1919             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1920         }
1921 
1922         /* We must now copy-and-convert len bytes of payload
1923          * into tgt_len bytes of destination space. Bear in mind
1924          * that in both source and destination we may be dealing
1925          * with a truncated value!
1926          */
1927         switch (cmsg->cmsg_level) {
1928         case SOL_SOCKET:
1929             switch (cmsg->cmsg_type) {
1930             case SCM_RIGHTS:
1931             {
1932                 int *fd = (int *)data;
1933                 int *target_fd = (int *)target_data;
1934                 int i, numfds = tgt_len / sizeof(int);
1935 
1936                 for (i = 0; i < numfds; i++) {
1937                     __put_user(fd[i], target_fd + i);
1938                 }
1939                 break;
1940             }
1941             case SO_TIMESTAMP:
1942             {
1943                 struct timeval *tv = (struct timeval *)data;
1944                 struct target_timeval *target_tv =
1945                     (struct target_timeval *)target_data;
1946 
1947                 if (len != sizeof(struct timeval) ||
1948                     tgt_len != sizeof(struct target_timeval)) {
1949                     goto unimplemented;
1950                 }
1951 
1952                 /* copy struct timeval to target */
1953                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1954                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1955                 break;
1956             }
1957             case SCM_CREDENTIALS:
1958             {
1959                 struct ucred *cred = (struct ucred *)data;
1960                 struct target_ucred *target_cred =
1961                     (struct target_ucred *)target_data;
1962 
1963                 __put_user(cred->pid, &target_cred->pid);
1964                 __put_user(cred->uid, &target_cred->uid);
1965                 __put_user(cred->gid, &target_cred->gid);
1966                 break;
1967             }
1968             default:
1969                 goto unimplemented;
1970             }
1971             break;
1972 
1973         case SOL_IP:
1974             switch (cmsg->cmsg_type) {
1975             case IP_TTL:
1976             {
1977                 uint32_t *v = (uint32_t *)data;
1978                 uint32_t *t_int = (uint32_t *)target_data;
1979 
1980                 if (len != sizeof(uint32_t) ||
1981                     tgt_len != sizeof(uint32_t)) {
1982                     goto unimplemented;
1983                 }
1984                 __put_user(*v, t_int);
1985                 break;
1986             }
1987             case IP_RECVERR:
1988             {
1989                 struct errhdr_t {
1990                    struct sock_extended_err ee;
1991                    struct sockaddr_in offender;
1992                 };
1993                 struct errhdr_t *errh = (struct errhdr_t *)data;
1994                 struct errhdr_t *target_errh =
1995                     (struct errhdr_t *)target_data;
1996 
1997                 if (len != sizeof(struct errhdr_t) ||
1998                     tgt_len != sizeof(struct errhdr_t)) {
1999                     goto unimplemented;
2000                 }
2001                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2002                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2003                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2004                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2005                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2006                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2007                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2008                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2009                     (void *) &errh->offender, sizeof(errh->offender));
2010                 break;
2011             }
2012             default:
2013                 goto unimplemented;
2014             }
2015             break;
2016 
2017         case SOL_IPV6:
2018             switch (cmsg->cmsg_type) {
2019             case IPV6_HOPLIMIT:
2020             {
2021                 uint32_t *v = (uint32_t *)data;
2022                 uint32_t *t_int = (uint32_t *)target_data;
2023 
2024                 if (len != sizeof(uint32_t) ||
2025                     tgt_len != sizeof(uint32_t)) {
2026                     goto unimplemented;
2027                 }
2028                 __put_user(*v, t_int);
2029                 break;
2030             }
2031             case IPV6_RECVERR:
2032             {
2033                 struct errhdr6_t {
2034                    struct sock_extended_err ee;
2035                    struct sockaddr_in6 offender;
2036                 };
2037                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2038                 struct errhdr6_t *target_errh =
2039                     (struct errhdr6_t *)target_data;
2040 
2041                 if (len != sizeof(struct errhdr6_t) ||
2042                     tgt_len != sizeof(struct errhdr6_t)) {
2043                     goto unimplemented;
2044                 }
2045                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2046                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2047                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2048                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2049                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2050                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2051                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2052                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2053                     (void *) &errh->offender, sizeof(errh->offender));
2054                 break;
2055             }
2056             default:
2057                 goto unimplemented;
2058             }
2059             break;
2060 
2061         default:
2062         unimplemented:
2063             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2064                           cmsg->cmsg_level, cmsg->cmsg_type);
2065             memcpy(target_data, data, MIN(len, tgt_len));
2066             if (tgt_len > len) {
2067                 memset(target_data + len, 0, tgt_len - len);
2068             }
2069         }
2070 
2071         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2072         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2073         if (msg_controllen < tgt_space) {
2074             tgt_space = msg_controllen;
2075         }
2076         msg_controllen -= tgt_space;
2077         space += tgt_space;
2078         cmsg = CMSG_NXTHDR(msgh, cmsg);
2079         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2080                                          target_cmsg_start);
2081     }
2082     unlock_user(target_cmsg, target_cmsg_addr, space);
2083  the_end:
2084     target_msgh->msg_controllen = tswapal(space);
2085     return 0;
2086 }
2087 
2088 /* do_setsockopt() Must return target values and target errnos. */
2089 static abi_long do_setsockopt(int sockfd, int level, int optname,
2090                               abi_ulong optval_addr, socklen_t optlen)
2091 {
2092     abi_long ret;
2093     int val;
2094     struct ip_mreqn *ip_mreq;
2095     struct ip_mreq_source *ip_mreq_source;
2096 
2097     switch(level) {
2098     case SOL_TCP:
2099     case SOL_UDP:
2100         /* TCP and UDP options all take an 'int' value.  */
2101         if (optlen < sizeof(uint32_t))
2102             return -TARGET_EINVAL;
2103 
2104         if (get_user_u32(val, optval_addr))
2105             return -TARGET_EFAULT;
2106         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2107         break;
2108     case SOL_IP:
2109         switch(optname) {
2110         case IP_TOS:
2111         case IP_TTL:
2112         case IP_HDRINCL:
2113         case IP_ROUTER_ALERT:
2114         case IP_RECVOPTS:
2115         case IP_RETOPTS:
2116         case IP_PKTINFO:
2117         case IP_MTU_DISCOVER:
2118         case IP_RECVERR:
2119         case IP_RECVTTL:
2120         case IP_RECVTOS:
2121 #ifdef IP_FREEBIND
2122         case IP_FREEBIND:
2123 #endif
2124         case IP_MULTICAST_TTL:
2125         case IP_MULTICAST_LOOP:
2126             val = 0;
2127             if (optlen >= sizeof(uint32_t)) {
2128                 if (get_user_u32(val, optval_addr))
2129                     return -TARGET_EFAULT;
2130             } else if (optlen >= 1) {
2131                 if (get_user_u8(val, optval_addr))
2132                     return -TARGET_EFAULT;
2133             }
2134             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2135             break;
2136         case IP_ADD_MEMBERSHIP:
2137         case IP_DROP_MEMBERSHIP:
2138             if (optlen < sizeof (struct target_ip_mreq) ||
2139                 optlen > sizeof (struct target_ip_mreqn))
2140                 return -TARGET_EINVAL;
2141 
2142             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2143             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2144             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2145             break;
2146 
2147         case IP_BLOCK_SOURCE:
2148         case IP_UNBLOCK_SOURCE:
2149         case IP_ADD_SOURCE_MEMBERSHIP:
2150         case IP_DROP_SOURCE_MEMBERSHIP:
2151             if (optlen != sizeof (struct target_ip_mreq_source))
2152                 return -TARGET_EINVAL;
2153 
2154             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2155             if (!ip_mreq_source) {
2156                 return -TARGET_EFAULT;
2157             }
2158             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2159             unlock_user (ip_mreq_source, optval_addr, 0);
2160             break;
2161 
2162         default:
2163             goto unimplemented;
2164         }
2165         break;
2166     case SOL_IPV6:
2167         switch (optname) {
2168         case IPV6_MTU_DISCOVER:
2169         case IPV6_MTU:
2170         case IPV6_V6ONLY:
2171         case IPV6_RECVPKTINFO:
2172         case IPV6_UNICAST_HOPS:
2173         case IPV6_MULTICAST_HOPS:
2174         case IPV6_MULTICAST_LOOP:
2175         case IPV6_RECVERR:
2176         case IPV6_RECVHOPLIMIT:
2177         case IPV6_2292HOPLIMIT:
2178         case IPV6_CHECKSUM:
2179         case IPV6_ADDRFORM:
2180         case IPV6_2292PKTINFO:
2181         case IPV6_RECVTCLASS:
2182         case IPV6_RECVRTHDR:
2183         case IPV6_2292RTHDR:
2184         case IPV6_RECVHOPOPTS:
2185         case IPV6_2292HOPOPTS:
2186         case IPV6_RECVDSTOPTS:
2187         case IPV6_2292DSTOPTS:
2188         case IPV6_TCLASS:
2189         case IPV6_ADDR_PREFERENCES:
2190 #ifdef IPV6_RECVPATHMTU
2191         case IPV6_RECVPATHMTU:
2192 #endif
2193 #ifdef IPV6_TRANSPARENT
2194         case IPV6_TRANSPARENT:
2195 #endif
2196 #ifdef IPV6_FREEBIND
2197         case IPV6_FREEBIND:
2198 #endif
2199 #ifdef IPV6_RECVORIGDSTADDR
2200         case IPV6_RECVORIGDSTADDR:
2201 #endif
2202             val = 0;
2203             if (optlen < sizeof(uint32_t)) {
2204                 return -TARGET_EINVAL;
2205             }
2206             if (get_user_u32(val, optval_addr)) {
2207                 return -TARGET_EFAULT;
2208             }
2209             ret = get_errno(setsockopt(sockfd, level, optname,
2210                                        &val, sizeof(val)));
2211             break;
2212         case IPV6_PKTINFO:
2213         {
2214             struct in6_pktinfo pki;
2215 
2216             if (optlen < sizeof(pki)) {
2217                 return -TARGET_EINVAL;
2218             }
2219 
2220             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2221                 return -TARGET_EFAULT;
2222             }
2223 
2224             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2225 
2226             ret = get_errno(setsockopt(sockfd, level, optname,
2227                                        &pki, sizeof(pki)));
2228             break;
2229         }
2230         case IPV6_ADD_MEMBERSHIP:
2231         case IPV6_DROP_MEMBERSHIP:
2232         {
2233             struct ipv6_mreq ipv6mreq;
2234 
2235             if (optlen < sizeof(ipv6mreq)) {
2236                 return -TARGET_EINVAL;
2237             }
2238 
2239             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2240                 return -TARGET_EFAULT;
2241             }
2242 
2243             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2244 
2245             ret = get_errno(setsockopt(sockfd, level, optname,
2246                                        &ipv6mreq, sizeof(ipv6mreq)));
2247             break;
2248         }
2249         default:
2250             goto unimplemented;
2251         }
2252         break;
2253     case SOL_ICMPV6:
2254         switch (optname) {
2255         case ICMPV6_FILTER:
2256         {
2257             struct icmp6_filter icmp6f;
2258 
2259             if (optlen > sizeof(icmp6f)) {
2260                 optlen = sizeof(icmp6f);
2261             }
2262 
2263             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2264                 return -TARGET_EFAULT;
2265             }
2266 
2267             for (val = 0; val < 8; val++) {
2268                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2269             }
2270 
2271             ret = get_errno(setsockopt(sockfd, level, optname,
2272                                        &icmp6f, optlen));
2273             break;
2274         }
2275         default:
2276             goto unimplemented;
2277         }
2278         break;
2279     case SOL_RAW:
2280         switch (optname) {
2281         case ICMP_FILTER:
2282         case IPV6_CHECKSUM:
2283             /* those take an u32 value */
2284             if (optlen < sizeof(uint32_t)) {
2285                 return -TARGET_EINVAL;
2286             }
2287 
2288             if (get_user_u32(val, optval_addr)) {
2289                 return -TARGET_EFAULT;
2290             }
2291             ret = get_errno(setsockopt(sockfd, level, optname,
2292                                        &val, sizeof(val)));
2293             break;
2294 
2295         default:
2296             goto unimplemented;
2297         }
2298         break;
2299 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2300     case SOL_ALG:
2301         switch (optname) {
2302         case ALG_SET_KEY:
2303         {
2304             char *alg_key = g_malloc(optlen);
2305 
2306             if (!alg_key) {
2307                 return -TARGET_ENOMEM;
2308             }
2309             if (copy_from_user(alg_key, optval_addr, optlen)) {
2310                 g_free(alg_key);
2311                 return -TARGET_EFAULT;
2312             }
2313             ret = get_errno(setsockopt(sockfd, level, optname,
2314                                        alg_key, optlen));
2315             g_free(alg_key);
2316             break;
2317         }
2318         case ALG_SET_AEAD_AUTHSIZE:
2319         {
2320             ret = get_errno(setsockopt(sockfd, level, optname,
2321                                        NULL, optlen));
2322             break;
2323         }
2324         default:
2325             goto unimplemented;
2326         }
2327         break;
2328 #endif
2329     case TARGET_SOL_SOCKET:
2330         switch (optname) {
2331         case TARGET_SO_RCVTIMEO:
2332         {
2333                 struct timeval tv;
2334 
2335                 optname = SO_RCVTIMEO;
2336 
2337 set_timeout:
2338                 if (optlen != sizeof(struct target_timeval)) {
2339                     return -TARGET_EINVAL;
2340                 }
2341 
2342                 if (copy_from_user_timeval(&tv, optval_addr)) {
2343                     return -TARGET_EFAULT;
2344                 }
2345 
2346                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2347                                 &tv, sizeof(tv)));
2348                 return ret;
2349         }
2350         case TARGET_SO_SNDTIMEO:
2351                 optname = SO_SNDTIMEO;
2352                 goto set_timeout;
2353         case TARGET_SO_ATTACH_FILTER:
2354         {
2355                 struct target_sock_fprog *tfprog;
2356                 struct target_sock_filter *tfilter;
2357                 struct sock_fprog fprog;
2358                 struct sock_filter *filter;
2359                 int i;
2360 
2361                 if (optlen != sizeof(*tfprog)) {
2362                     return -TARGET_EINVAL;
2363                 }
2364                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2365                     return -TARGET_EFAULT;
2366                 }
2367                 if (!lock_user_struct(VERIFY_READ, tfilter,
2368                                       tswapal(tfprog->filter), 0)) {
2369                     unlock_user_struct(tfprog, optval_addr, 1);
2370                     return -TARGET_EFAULT;
2371                 }
2372 
2373                 fprog.len = tswap16(tfprog->len);
2374                 filter = g_try_new(struct sock_filter, fprog.len);
2375                 if (filter == NULL) {
2376                     unlock_user_struct(tfilter, tfprog->filter, 1);
2377                     unlock_user_struct(tfprog, optval_addr, 1);
2378                     return -TARGET_ENOMEM;
2379                 }
2380                 for (i = 0; i < fprog.len; i++) {
2381                     filter[i].code = tswap16(tfilter[i].code);
2382                     filter[i].jt = tfilter[i].jt;
2383                     filter[i].jf = tfilter[i].jf;
2384                     filter[i].k = tswap32(tfilter[i].k);
2385                 }
2386                 fprog.filter = filter;
2387 
2388                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2389                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2390                 g_free(filter);
2391 
2392                 unlock_user_struct(tfilter, tfprog->filter, 1);
2393                 unlock_user_struct(tfprog, optval_addr, 1);
2394                 return ret;
2395         }
2396 	case TARGET_SO_BINDTODEVICE:
2397 	{
2398 		char *dev_ifname, *addr_ifname;
2399 
2400 		if (optlen > IFNAMSIZ - 1) {
2401 		    optlen = IFNAMSIZ - 1;
2402 		}
2403 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2404 		if (!dev_ifname) {
2405 		    return -TARGET_EFAULT;
2406 		}
2407 		optname = SO_BINDTODEVICE;
2408 		addr_ifname = alloca(IFNAMSIZ);
2409 		memcpy(addr_ifname, dev_ifname, optlen);
2410 		addr_ifname[optlen] = 0;
2411 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2412                                            addr_ifname, optlen));
2413 		unlock_user (dev_ifname, optval_addr, 0);
2414 		return ret;
2415 	}
2416         case TARGET_SO_LINGER:
2417         {
2418                 struct linger lg;
2419                 struct target_linger *tlg;
2420 
2421                 if (optlen != sizeof(struct target_linger)) {
2422                     return -TARGET_EINVAL;
2423                 }
2424                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2425                     return -TARGET_EFAULT;
2426                 }
2427                 __get_user(lg.l_onoff, &tlg->l_onoff);
2428                 __get_user(lg.l_linger, &tlg->l_linger);
2429                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2430                                 &lg, sizeof(lg)));
2431                 unlock_user_struct(tlg, optval_addr, 0);
2432                 return ret;
2433         }
2434             /* Options with 'int' argument.  */
2435         case TARGET_SO_DEBUG:
2436 		optname = SO_DEBUG;
2437 		break;
2438         case TARGET_SO_REUSEADDR:
2439 		optname = SO_REUSEADDR;
2440 		break;
2441 #ifdef SO_REUSEPORT
2442         case TARGET_SO_REUSEPORT:
2443                 optname = SO_REUSEPORT;
2444                 break;
2445 #endif
2446         case TARGET_SO_TYPE:
2447 		optname = SO_TYPE;
2448 		break;
2449         case TARGET_SO_ERROR:
2450 		optname = SO_ERROR;
2451 		break;
2452         case TARGET_SO_DONTROUTE:
2453 		optname = SO_DONTROUTE;
2454 		break;
2455         case TARGET_SO_BROADCAST:
2456 		optname = SO_BROADCAST;
2457 		break;
2458         case TARGET_SO_SNDBUF:
2459 		optname = SO_SNDBUF;
2460 		break;
2461         case TARGET_SO_SNDBUFFORCE:
2462                 optname = SO_SNDBUFFORCE;
2463                 break;
2464         case TARGET_SO_RCVBUF:
2465 		optname = SO_RCVBUF;
2466 		break;
2467         case TARGET_SO_RCVBUFFORCE:
2468                 optname = SO_RCVBUFFORCE;
2469                 break;
2470         case TARGET_SO_KEEPALIVE:
2471 		optname = SO_KEEPALIVE;
2472 		break;
2473         case TARGET_SO_OOBINLINE:
2474 		optname = SO_OOBINLINE;
2475 		break;
2476         case TARGET_SO_NO_CHECK:
2477 		optname = SO_NO_CHECK;
2478 		break;
2479         case TARGET_SO_PRIORITY:
2480 		optname = SO_PRIORITY;
2481 		break;
2482 #ifdef SO_BSDCOMPAT
2483         case TARGET_SO_BSDCOMPAT:
2484 		optname = SO_BSDCOMPAT;
2485 		break;
2486 #endif
2487         case TARGET_SO_PASSCRED:
2488 		optname = SO_PASSCRED;
2489 		break;
2490         case TARGET_SO_PASSSEC:
2491                 optname = SO_PASSSEC;
2492                 break;
2493         case TARGET_SO_TIMESTAMP:
2494 		optname = SO_TIMESTAMP;
2495 		break;
2496         case TARGET_SO_RCVLOWAT:
2497 		optname = SO_RCVLOWAT;
2498 		break;
2499         default:
2500             goto unimplemented;
2501         }
2502 	if (optlen < sizeof(uint32_t))
2503             return -TARGET_EINVAL;
2504 
2505 	if (get_user_u32(val, optval_addr))
2506             return -TARGET_EFAULT;
2507 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2508         break;
2509 #ifdef SOL_NETLINK
2510     case SOL_NETLINK:
2511         switch (optname) {
2512         case NETLINK_PKTINFO:
2513         case NETLINK_ADD_MEMBERSHIP:
2514         case NETLINK_DROP_MEMBERSHIP:
2515         case NETLINK_BROADCAST_ERROR:
2516         case NETLINK_NO_ENOBUFS:
2517 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2518         case NETLINK_LISTEN_ALL_NSID:
2519         case NETLINK_CAP_ACK:
2520 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2521 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2522         case NETLINK_EXT_ACK:
2523 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2524 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2525         case NETLINK_GET_STRICT_CHK:
2526 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2527             break;
2528         default:
2529             goto unimplemented;
2530         }
2531         val = 0;
2532         if (optlen < sizeof(uint32_t)) {
2533             return -TARGET_EINVAL;
2534         }
2535         if (get_user_u32(val, optval_addr)) {
2536             return -TARGET_EFAULT;
2537         }
2538         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2539                                    sizeof(val)));
2540         break;
2541 #endif /* SOL_NETLINK */
2542     default:
2543     unimplemented:
2544         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2545                       level, optname);
2546         ret = -TARGET_ENOPROTOOPT;
2547     }
2548     return ret;
2549 }
2550 
2551 /* do_getsockopt() Must return target values and target errnos. */
2552 static abi_long do_getsockopt(int sockfd, int level, int optname,
2553                               abi_ulong optval_addr, abi_ulong optlen)
2554 {
2555     abi_long ret;
2556     int len, val;
2557     socklen_t lv;
2558 
2559     switch(level) {
2560     case TARGET_SOL_SOCKET:
2561         level = SOL_SOCKET;
2562         switch (optname) {
2563         /* These don't just return a single integer */
2564         case TARGET_SO_PEERNAME:
2565             goto unimplemented;
2566         case TARGET_SO_RCVTIMEO: {
2567             struct timeval tv;
2568             socklen_t tvlen;
2569 
2570             optname = SO_RCVTIMEO;
2571 
2572 get_timeout:
2573             if (get_user_u32(len, optlen)) {
2574                 return -TARGET_EFAULT;
2575             }
2576             if (len < 0) {
2577                 return -TARGET_EINVAL;
2578             }
2579 
2580             tvlen = sizeof(tv);
2581             ret = get_errno(getsockopt(sockfd, level, optname,
2582                                        &tv, &tvlen));
2583             if (ret < 0) {
2584                 return ret;
2585             }
2586             if (len > sizeof(struct target_timeval)) {
2587                 len = sizeof(struct target_timeval);
2588             }
2589             if (copy_to_user_timeval(optval_addr, &tv)) {
2590                 return -TARGET_EFAULT;
2591             }
2592             if (put_user_u32(len, optlen)) {
2593                 return -TARGET_EFAULT;
2594             }
2595             break;
2596         }
2597         case TARGET_SO_SNDTIMEO:
2598             optname = SO_SNDTIMEO;
2599             goto get_timeout;
2600         case TARGET_SO_PEERCRED: {
2601             struct ucred cr;
2602             socklen_t crlen;
2603             struct target_ucred *tcr;
2604 
2605             if (get_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             if (len < 0) {
2609                 return -TARGET_EINVAL;
2610             }
2611 
2612             crlen = sizeof(cr);
2613             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2614                                        &cr, &crlen));
2615             if (ret < 0) {
2616                 return ret;
2617             }
2618             if (len > crlen) {
2619                 len = crlen;
2620             }
2621             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2622                 return -TARGET_EFAULT;
2623             }
2624             __put_user(cr.pid, &tcr->pid);
2625             __put_user(cr.uid, &tcr->uid);
2626             __put_user(cr.gid, &tcr->gid);
2627             unlock_user_struct(tcr, optval_addr, 1);
2628             if (put_user_u32(len, optlen)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             break;
2632         }
2633         case TARGET_SO_PEERSEC: {
2634             char *name;
2635 
2636             if (get_user_u32(len, optlen)) {
2637                 return -TARGET_EFAULT;
2638             }
2639             if (len < 0) {
2640                 return -TARGET_EINVAL;
2641             }
2642             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2643             if (!name) {
2644                 return -TARGET_EFAULT;
2645             }
2646             lv = len;
2647             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2648                                        name, &lv));
2649             if (put_user_u32(lv, optlen)) {
2650                 ret = -TARGET_EFAULT;
2651             }
2652             unlock_user(name, optval_addr, lv);
2653             break;
2654         }
2655         case TARGET_SO_LINGER:
2656         {
2657             struct linger lg;
2658             socklen_t lglen;
2659             struct target_linger *tlg;
2660 
2661             if (get_user_u32(len, optlen)) {
2662                 return -TARGET_EFAULT;
2663             }
2664             if (len < 0) {
2665                 return -TARGET_EINVAL;
2666             }
2667 
2668             lglen = sizeof(lg);
2669             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2670                                        &lg, &lglen));
2671             if (ret < 0) {
2672                 return ret;
2673             }
2674             if (len > lglen) {
2675                 len = lglen;
2676             }
2677             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2678                 return -TARGET_EFAULT;
2679             }
2680             __put_user(lg.l_onoff, &tlg->l_onoff);
2681             __put_user(lg.l_linger, &tlg->l_linger);
2682             unlock_user_struct(tlg, optval_addr, 1);
2683             if (put_user_u32(len, optlen)) {
2684                 return -TARGET_EFAULT;
2685             }
2686             break;
2687         }
2688         /* Options with 'int' argument.  */
2689         case TARGET_SO_DEBUG:
2690             optname = SO_DEBUG;
2691             goto int_case;
2692         case TARGET_SO_REUSEADDR:
2693             optname = SO_REUSEADDR;
2694             goto int_case;
2695 #ifdef SO_REUSEPORT
2696         case TARGET_SO_REUSEPORT:
2697             optname = SO_REUSEPORT;
2698             goto int_case;
2699 #endif
2700         case TARGET_SO_TYPE:
2701             optname = SO_TYPE;
2702             goto int_case;
2703         case TARGET_SO_ERROR:
2704             optname = SO_ERROR;
2705             goto int_case;
2706         case TARGET_SO_DONTROUTE:
2707             optname = SO_DONTROUTE;
2708             goto int_case;
2709         case TARGET_SO_BROADCAST:
2710             optname = SO_BROADCAST;
2711             goto int_case;
2712         case TARGET_SO_SNDBUF:
2713             optname = SO_SNDBUF;
2714             goto int_case;
2715         case TARGET_SO_RCVBUF:
2716             optname = SO_RCVBUF;
2717             goto int_case;
2718         case TARGET_SO_KEEPALIVE:
2719             optname = SO_KEEPALIVE;
2720             goto int_case;
2721         case TARGET_SO_OOBINLINE:
2722             optname = SO_OOBINLINE;
2723             goto int_case;
2724         case TARGET_SO_NO_CHECK:
2725             optname = SO_NO_CHECK;
2726             goto int_case;
2727         case TARGET_SO_PRIORITY:
2728             optname = SO_PRIORITY;
2729             goto int_case;
2730 #ifdef SO_BSDCOMPAT
2731         case TARGET_SO_BSDCOMPAT:
2732             optname = SO_BSDCOMPAT;
2733             goto int_case;
2734 #endif
2735         case TARGET_SO_PASSCRED:
2736             optname = SO_PASSCRED;
2737             goto int_case;
2738         case TARGET_SO_TIMESTAMP:
2739             optname = SO_TIMESTAMP;
2740             goto int_case;
2741         case TARGET_SO_RCVLOWAT:
2742             optname = SO_RCVLOWAT;
2743             goto int_case;
2744         case TARGET_SO_ACCEPTCONN:
2745             optname = SO_ACCEPTCONN;
2746             goto int_case;
2747         case TARGET_SO_PROTOCOL:
2748             optname = SO_PROTOCOL;
2749             goto int_case;
2750         case TARGET_SO_DOMAIN:
2751             optname = SO_DOMAIN;
2752             goto int_case;
2753         default:
2754             goto int_case;
2755         }
2756         break;
2757     case SOL_TCP:
2758     case SOL_UDP:
2759         /* TCP and UDP options all take an 'int' value.  */
2760     int_case:
2761         if (get_user_u32(len, optlen))
2762             return -TARGET_EFAULT;
2763         if (len < 0)
2764             return -TARGET_EINVAL;
2765         lv = sizeof(lv);
2766         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2767         if (ret < 0)
2768             return ret;
2769         switch (optname) {
2770         case SO_TYPE:
2771             val = host_to_target_sock_type(val);
2772             break;
2773         case SO_ERROR:
2774             val = host_to_target_errno(val);
2775             break;
2776         }
2777         if (len > lv)
2778             len = lv;
2779         if (len == 4) {
2780             if (put_user_u32(val, optval_addr))
2781                 return -TARGET_EFAULT;
2782         } else {
2783             if (put_user_u8(val, optval_addr))
2784                 return -TARGET_EFAULT;
2785         }
2786         if (put_user_u32(len, optlen))
2787             return -TARGET_EFAULT;
2788         break;
2789     case SOL_IP:
2790         switch(optname) {
2791         case IP_TOS:
2792         case IP_TTL:
2793         case IP_HDRINCL:
2794         case IP_ROUTER_ALERT:
2795         case IP_RECVOPTS:
2796         case IP_RETOPTS:
2797         case IP_PKTINFO:
2798         case IP_MTU_DISCOVER:
2799         case IP_RECVERR:
2800         case IP_RECVTOS:
2801 #ifdef IP_FREEBIND
2802         case IP_FREEBIND:
2803 #endif
2804         case IP_MULTICAST_TTL:
2805         case IP_MULTICAST_LOOP:
2806             if (get_user_u32(len, optlen))
2807                 return -TARGET_EFAULT;
2808             if (len < 0)
2809                 return -TARGET_EINVAL;
2810             lv = sizeof(lv);
2811             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2812             if (ret < 0)
2813                 return ret;
2814             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2815                 len = 1;
2816                 if (put_user_u32(len, optlen)
2817                     || put_user_u8(val, optval_addr))
2818                     return -TARGET_EFAULT;
2819             } else {
2820                 if (len > sizeof(int))
2821                     len = sizeof(int);
2822                 if (put_user_u32(len, optlen)
2823                     || put_user_u32(val, optval_addr))
2824                     return -TARGET_EFAULT;
2825             }
2826             break;
2827         default:
2828             ret = -TARGET_ENOPROTOOPT;
2829             break;
2830         }
2831         break;
2832     case SOL_IPV6:
2833         switch (optname) {
2834         case IPV6_MTU_DISCOVER:
2835         case IPV6_MTU:
2836         case IPV6_V6ONLY:
2837         case IPV6_RECVPKTINFO:
2838         case IPV6_UNICAST_HOPS:
2839         case IPV6_MULTICAST_HOPS:
2840         case IPV6_MULTICAST_LOOP:
2841         case IPV6_RECVERR:
2842         case IPV6_RECVHOPLIMIT:
2843         case IPV6_2292HOPLIMIT:
2844         case IPV6_CHECKSUM:
2845         case IPV6_ADDRFORM:
2846         case IPV6_2292PKTINFO:
2847         case IPV6_RECVTCLASS:
2848         case IPV6_RECVRTHDR:
2849         case IPV6_2292RTHDR:
2850         case IPV6_RECVHOPOPTS:
2851         case IPV6_2292HOPOPTS:
2852         case IPV6_RECVDSTOPTS:
2853         case IPV6_2292DSTOPTS:
2854         case IPV6_TCLASS:
2855         case IPV6_ADDR_PREFERENCES:
2856 #ifdef IPV6_RECVPATHMTU
2857         case IPV6_RECVPATHMTU:
2858 #endif
2859 #ifdef IPV6_TRANSPARENT
2860         case IPV6_TRANSPARENT:
2861 #endif
2862 #ifdef IPV6_FREEBIND
2863         case IPV6_FREEBIND:
2864 #endif
2865 #ifdef IPV6_RECVORIGDSTADDR
2866         case IPV6_RECVORIGDSTADDR:
2867 #endif
2868             if (get_user_u32(len, optlen))
2869                 return -TARGET_EFAULT;
2870             if (len < 0)
2871                 return -TARGET_EINVAL;
2872             lv = sizeof(lv);
2873             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2874             if (ret < 0)
2875                 return ret;
2876             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2877                 len = 1;
2878                 if (put_user_u32(len, optlen)
2879                     || put_user_u8(val, optval_addr))
2880                     return -TARGET_EFAULT;
2881             } else {
2882                 if (len > sizeof(int))
2883                     len = sizeof(int);
2884                 if (put_user_u32(len, optlen)
2885                     || put_user_u32(val, optval_addr))
2886                     return -TARGET_EFAULT;
2887             }
2888             break;
2889         default:
2890             ret = -TARGET_ENOPROTOOPT;
2891             break;
2892         }
2893         break;
2894 #ifdef SOL_NETLINK
2895     case SOL_NETLINK:
2896         switch (optname) {
2897         case NETLINK_PKTINFO:
2898         case NETLINK_BROADCAST_ERROR:
2899         case NETLINK_NO_ENOBUFS:
2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2901         case NETLINK_LISTEN_ALL_NSID:
2902         case NETLINK_CAP_ACK:
2903 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2904 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2905         case NETLINK_EXT_ACK:
2906 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2907 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2908         case NETLINK_GET_STRICT_CHK:
2909 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2910             if (get_user_u32(len, optlen)) {
2911                 return -TARGET_EFAULT;
2912             }
2913             if (len != sizeof(val)) {
2914                 return -TARGET_EINVAL;
2915             }
2916             lv = len;
2917             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2918             if (ret < 0) {
2919                 return ret;
2920             }
2921             if (put_user_u32(lv, optlen)
2922                 || put_user_u32(val, optval_addr)) {
2923                 return -TARGET_EFAULT;
2924             }
2925             break;
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2927         case NETLINK_LIST_MEMBERSHIPS:
2928         {
2929             uint32_t *results;
2930             int i;
2931             if (get_user_u32(len, optlen)) {
2932                 return -TARGET_EFAULT;
2933             }
2934             if (len < 0) {
2935                 return -TARGET_EINVAL;
2936             }
2937             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2938             if (!results && len > 0) {
2939                 return -TARGET_EFAULT;
2940             }
2941             lv = len;
2942             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2943             if (ret < 0) {
2944                 unlock_user(results, optval_addr, 0);
2945                 return ret;
2946             }
2947             /* swap host endianess to target endianess. */
2948             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2949                 results[i] = tswap32(results[i]);
2950             }
2951             if (put_user_u32(lv, optlen)) {
2952                 return -TARGET_EFAULT;
2953             }
2954             unlock_user(results, optval_addr, 0);
2955             break;
2956         }
2957 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2958         default:
2959             goto unimplemented;
2960         }
2961         break;
2962 #endif /* SOL_NETLINK */
2963     default:
2964     unimplemented:
2965         qemu_log_mask(LOG_UNIMP,
2966                       "getsockopt level=%d optname=%d not yet supported\n",
2967                       level, optname);
2968         ret = -TARGET_EOPNOTSUPP;
2969         break;
2970     }
2971     return ret;
2972 }
2973 
2974 /* Convert target low/high pair representing file offset into the host
2975  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2976  * as the kernel doesn't handle them either.
2977  */
2978 static void target_to_host_low_high(abi_ulong tlow,
2979                                     abi_ulong thigh,
2980                                     unsigned long *hlow,
2981                                     unsigned long *hhigh)
2982 {
2983     uint64_t off = tlow |
2984         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2985         TARGET_LONG_BITS / 2;
2986 
2987     *hlow = off;
2988     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2989 }
2990 
2991 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2992                                 abi_ulong count, int copy)
2993 {
2994     struct target_iovec *target_vec;
2995     struct iovec *vec;
2996     abi_ulong total_len, max_len;
2997     int i;
2998     int err = 0;
2999     bool bad_address = false;
3000 
3001     if (count == 0) {
3002         errno = 0;
3003         return NULL;
3004     }
3005     if (count > IOV_MAX) {
3006         errno = EINVAL;
3007         return NULL;
3008     }
3009 
3010     vec = g_try_new0(struct iovec, count);
3011     if (vec == NULL) {
3012         errno = ENOMEM;
3013         return NULL;
3014     }
3015 
3016     target_vec = lock_user(VERIFY_READ, target_addr,
3017                            count * sizeof(struct target_iovec), 1);
3018     if (target_vec == NULL) {
3019         err = EFAULT;
3020         goto fail2;
3021     }
3022 
3023     /* ??? If host page size > target page size, this will result in a
3024        value larger than what we can actually support.  */
3025     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3026     total_len = 0;
3027 
3028     for (i = 0; i < count; i++) {
3029         abi_ulong base = tswapal(target_vec[i].iov_base);
3030         abi_long len = tswapal(target_vec[i].iov_len);
3031 
3032         if (len < 0) {
3033             err = EINVAL;
3034             goto fail;
3035         } else if (len == 0) {
3036             /* Zero length pointer is ignored.  */
3037             vec[i].iov_base = 0;
3038         } else {
3039             vec[i].iov_base = lock_user(type, base, len, copy);
3040             /* If the first buffer pointer is bad, this is a fault.  But
3041              * subsequent bad buffers will result in a partial write; this
3042              * is realized by filling the vector with null pointers and
3043              * zero lengths. */
3044             if (!vec[i].iov_base) {
3045                 if (i == 0) {
3046                     err = EFAULT;
3047                     goto fail;
3048                 } else {
3049                     bad_address = true;
3050                 }
3051             }
3052             if (bad_address) {
3053                 len = 0;
3054             }
3055             if (len > max_len - total_len) {
3056                 len = max_len - total_len;
3057             }
3058         }
3059         vec[i].iov_len = len;
3060         total_len += len;
3061     }
3062 
3063     unlock_user(target_vec, target_addr, 0);
3064     return vec;
3065 
3066  fail:
3067     while (--i >= 0) {
3068         if (tswapal(target_vec[i].iov_len) > 0) {
3069             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3070         }
3071     }
3072     unlock_user(target_vec, target_addr, 0);
3073  fail2:
3074     g_free(vec);
3075     errno = err;
3076     return NULL;
3077 }
3078 
3079 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3080                          abi_ulong count, int copy)
3081 {
3082     struct target_iovec *target_vec;
3083     int i;
3084 
3085     target_vec = lock_user(VERIFY_READ, target_addr,
3086                            count * sizeof(struct target_iovec), 1);
3087     if (target_vec) {
3088         for (i = 0; i < count; i++) {
3089             abi_ulong base = tswapal(target_vec[i].iov_base);
3090             abi_long len = tswapal(target_vec[i].iov_len);
3091             if (len < 0) {
3092                 break;
3093             }
3094             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3095         }
3096         unlock_user(target_vec, target_addr, 0);
3097     }
3098 
3099     g_free(vec);
3100 }
3101 
3102 static inline int target_to_host_sock_type(int *type)
3103 {
3104     int host_type = 0;
3105     int target_type = *type;
3106 
3107     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3108     case TARGET_SOCK_DGRAM:
3109         host_type = SOCK_DGRAM;
3110         break;
3111     case TARGET_SOCK_STREAM:
3112         host_type = SOCK_STREAM;
3113         break;
3114     default:
3115         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3116         break;
3117     }
3118     if (target_type & TARGET_SOCK_CLOEXEC) {
3119 #if defined(SOCK_CLOEXEC)
3120         host_type |= SOCK_CLOEXEC;
3121 #else
3122         return -TARGET_EINVAL;
3123 #endif
3124     }
3125     if (target_type & TARGET_SOCK_NONBLOCK) {
3126 #if defined(SOCK_NONBLOCK)
3127         host_type |= SOCK_NONBLOCK;
3128 #elif !defined(O_NONBLOCK)
3129         return -TARGET_EINVAL;
3130 #endif
3131     }
3132     *type = host_type;
3133     return 0;
3134 }
3135 
3136 /* Try to emulate socket type flags after socket creation.  */
3137 static int sock_flags_fixup(int fd, int target_type)
3138 {
3139 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3140     if (target_type & TARGET_SOCK_NONBLOCK) {
3141         int flags = fcntl(fd, F_GETFL);
3142         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3143             close(fd);
3144             return -TARGET_EINVAL;
3145         }
3146     }
3147 #endif
3148     return fd;
3149 }
3150 
3151 /* do_socket() Must return target values and target errnos. */
3152 static abi_long do_socket(int domain, int type, int protocol)
3153 {
3154     int target_type = type;
3155     int ret;
3156 
3157     ret = target_to_host_sock_type(&type);
3158     if (ret) {
3159         return ret;
3160     }
3161 
3162     if (domain == PF_NETLINK && !(
3163 #ifdef CONFIG_RTNETLINK
3164          protocol == NETLINK_ROUTE ||
3165 #endif
3166          protocol == NETLINK_KOBJECT_UEVENT ||
3167          protocol == NETLINK_AUDIT)) {
3168         return -TARGET_EPROTONOSUPPORT;
3169     }
3170 
3171     if (domain == AF_PACKET ||
3172         (domain == AF_INET && type == SOCK_PACKET)) {
3173         protocol = tswap16(protocol);
3174     }
3175 
3176     ret = get_errno(socket(domain, type, protocol));
3177     if (ret >= 0) {
3178         ret = sock_flags_fixup(ret, target_type);
3179         if (type == SOCK_PACKET) {
3180             /* Manage an obsolete case :
3181              * if socket type is SOCK_PACKET, bind by name
3182              */
3183             fd_trans_register(ret, &target_packet_trans);
3184         } else if (domain == PF_NETLINK) {
3185             switch (protocol) {
3186 #ifdef CONFIG_RTNETLINK
3187             case NETLINK_ROUTE:
3188                 fd_trans_register(ret, &target_netlink_route_trans);
3189                 break;
3190 #endif
3191             case NETLINK_KOBJECT_UEVENT:
3192                 /* nothing to do: messages are strings */
3193                 break;
3194             case NETLINK_AUDIT:
3195                 fd_trans_register(ret, &target_netlink_audit_trans);
3196                 break;
3197             default:
3198                 g_assert_not_reached();
3199             }
3200         }
3201     }
3202     return ret;
3203 }
3204 
3205 /* do_bind() Must return target values and target errnos. */
3206 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3207                         socklen_t addrlen)
3208 {
3209     void *addr;
3210     abi_long ret;
3211 
3212     if ((int)addrlen < 0) {
3213         return -TARGET_EINVAL;
3214     }
3215 
3216     addr = alloca(addrlen+1);
3217 
3218     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3219     if (ret)
3220         return ret;
3221 
3222     return get_errno(bind(sockfd, addr, addrlen));
3223 }
3224 
3225 /* do_connect() Must return target values and target errnos. */
3226 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3227                            socklen_t addrlen)
3228 {
3229     void *addr;
3230     abi_long ret;
3231 
3232     if ((int)addrlen < 0) {
3233         return -TARGET_EINVAL;
3234     }
3235 
3236     addr = alloca(addrlen+1);
3237 
3238     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3239     if (ret)
3240         return ret;
3241 
3242     return get_errno(safe_connect(sockfd, addr, addrlen));
3243 }
3244 
3245 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3246 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3247                                       int flags, int send)
3248 {
3249     abi_long ret, len;
3250     struct msghdr msg;
3251     abi_ulong count;
3252     struct iovec *vec;
3253     abi_ulong target_vec;
3254 
3255     if (msgp->msg_name) {
3256         msg.msg_namelen = tswap32(msgp->msg_namelen);
3257         msg.msg_name = alloca(msg.msg_namelen+1);
3258         ret = target_to_host_sockaddr(fd, msg.msg_name,
3259                                       tswapal(msgp->msg_name),
3260                                       msg.msg_namelen);
3261         if (ret == -TARGET_EFAULT) {
3262             /* For connected sockets msg_name and msg_namelen must
3263              * be ignored, so returning EFAULT immediately is wrong.
3264              * Instead, pass a bad msg_name to the host kernel, and
3265              * let it decide whether to return EFAULT or not.
3266              */
3267             msg.msg_name = (void *)-1;
3268         } else if (ret) {
3269             goto out2;
3270         }
3271     } else {
3272         msg.msg_name = NULL;
3273         msg.msg_namelen = 0;
3274     }
3275     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3276     msg.msg_control = alloca(msg.msg_controllen);
3277     memset(msg.msg_control, 0, msg.msg_controllen);
3278 
3279     msg.msg_flags = tswap32(msgp->msg_flags);
3280 
3281     count = tswapal(msgp->msg_iovlen);
3282     target_vec = tswapal(msgp->msg_iov);
3283 
3284     if (count > IOV_MAX) {
3285         /* sendrcvmsg returns a different errno for this condition than
3286          * readv/writev, so we must catch it here before lock_iovec() does.
3287          */
3288         ret = -TARGET_EMSGSIZE;
3289         goto out2;
3290     }
3291 
3292     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3293                      target_vec, count, send);
3294     if (vec == NULL) {
3295         ret = -host_to_target_errno(errno);
3296         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3297         if (!send || ret) {
3298             goto out2;
3299         }
3300     }
3301     msg.msg_iovlen = count;
3302     msg.msg_iov = vec;
3303 
3304     if (send) {
3305         if (fd_trans_target_to_host_data(fd)) {
3306             void *host_msg;
3307 
3308             host_msg = g_malloc(msg.msg_iov->iov_len);
3309             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3310             ret = fd_trans_target_to_host_data(fd)(host_msg,
3311                                                    msg.msg_iov->iov_len);
3312             if (ret >= 0) {
3313                 msg.msg_iov->iov_base = host_msg;
3314                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3315             }
3316             g_free(host_msg);
3317         } else {
3318             ret = target_to_host_cmsg(&msg, msgp);
3319             if (ret == 0) {
3320                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3321             }
3322         }
3323     } else {
3324         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3325         if (!is_error(ret)) {
3326             len = ret;
3327             if (fd_trans_host_to_target_data(fd)) {
3328                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3329                                                MIN(msg.msg_iov->iov_len, len));
3330             }
3331             if (!is_error(ret)) {
3332                 ret = host_to_target_cmsg(msgp, &msg);
3333             }
3334             if (!is_error(ret)) {
3335                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3336                 msgp->msg_flags = tswap32(msg.msg_flags);
3337                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3338                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3339                                     msg.msg_name, msg.msg_namelen);
3340                     if (ret) {
3341                         goto out;
3342                     }
3343                 }
3344 
3345                 ret = len;
3346             }
3347         }
3348     }
3349 
3350 out:
3351     if (vec) {
3352         unlock_iovec(vec, target_vec, count, !send);
3353     }
3354 out2:
3355     return ret;
3356 }
3357 
3358 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3359                                int flags, int send)
3360 {
3361     abi_long ret;
3362     struct target_msghdr *msgp;
3363 
3364     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3365                           msgp,
3366                           target_msg,
3367                           send ? 1 : 0)) {
3368         return -TARGET_EFAULT;
3369     }
3370     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3371     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3372     return ret;
3373 }
3374 
3375 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3376  * so it might not have this *mmsg-specific flag either.
3377  */
3378 #ifndef MSG_WAITFORONE
3379 #define MSG_WAITFORONE 0x10000
3380 #endif
3381 
3382 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3383                                 unsigned int vlen, unsigned int flags,
3384                                 int send)
3385 {
3386     struct target_mmsghdr *mmsgp;
3387     abi_long ret = 0;
3388     int i;
3389 
3390     if (vlen > UIO_MAXIOV) {
3391         vlen = UIO_MAXIOV;
3392     }
3393 
3394     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3395     if (!mmsgp) {
3396         return -TARGET_EFAULT;
3397     }
3398 
3399     for (i = 0; i < vlen; i++) {
3400         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3401         if (is_error(ret)) {
3402             break;
3403         }
3404         mmsgp[i].msg_len = tswap32(ret);
3405         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3406         if (flags & MSG_WAITFORONE) {
3407             flags |= MSG_DONTWAIT;
3408         }
3409     }
3410 
3411     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3412 
3413     /* Return number of datagrams sent if we sent any at all;
3414      * otherwise return the error.
3415      */
3416     if (i) {
3417         return i;
3418     }
3419     return ret;
3420 }
3421 
3422 /* do_accept4() Must return target values and target errnos. */
3423 static abi_long do_accept4(int fd, abi_ulong target_addr,
3424                            abi_ulong target_addrlen_addr, int flags)
3425 {
3426     socklen_t addrlen, ret_addrlen;
3427     void *addr;
3428     abi_long ret;
3429     int host_flags;
3430 
3431     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3432 
3433     if (target_addr == 0) {
3434         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3435     }
3436 
3437     /* linux returns EFAULT if addrlen pointer is invalid */
3438     if (get_user_u32(addrlen, target_addrlen_addr))
3439         return -TARGET_EFAULT;
3440 
3441     if ((int)addrlen < 0) {
3442         return -TARGET_EINVAL;
3443     }
3444 
3445     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3446         return -TARGET_EFAULT;
3447     }
3448 
3449     addr = alloca(addrlen);
3450 
3451     ret_addrlen = addrlen;
3452     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3453     if (!is_error(ret)) {
3454         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3455         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3456             ret = -TARGET_EFAULT;
3457         }
3458     }
3459     return ret;
3460 }
3461 
3462 /* do_getpeername() Must return target values and target errnos. */
3463 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3464                                abi_ulong target_addrlen_addr)
3465 {
3466     socklen_t addrlen, ret_addrlen;
3467     void *addr;
3468     abi_long ret;
3469 
3470     if (get_user_u32(addrlen, target_addrlen_addr))
3471         return -TARGET_EFAULT;
3472 
3473     if ((int)addrlen < 0) {
3474         return -TARGET_EINVAL;
3475     }
3476 
3477     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3478         return -TARGET_EFAULT;
3479     }
3480 
3481     addr = alloca(addrlen);
3482 
3483     ret_addrlen = addrlen;
3484     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3485     if (!is_error(ret)) {
3486         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3487         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3488             ret = -TARGET_EFAULT;
3489         }
3490     }
3491     return ret;
3492 }
3493 
3494 /* do_getsockname() Must return target values and target errnos. */
3495 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3496                                abi_ulong target_addrlen_addr)
3497 {
3498     socklen_t addrlen, ret_addrlen;
3499     void *addr;
3500     abi_long ret;
3501 
3502     if (get_user_u32(addrlen, target_addrlen_addr))
3503         return -TARGET_EFAULT;
3504 
3505     if ((int)addrlen < 0) {
3506         return -TARGET_EINVAL;
3507     }
3508 
3509     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3510         return -TARGET_EFAULT;
3511     }
3512 
3513     addr = alloca(addrlen);
3514 
3515     ret_addrlen = addrlen;
3516     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3517     if (!is_error(ret)) {
3518         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3519         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3520             ret = -TARGET_EFAULT;
3521         }
3522     }
3523     return ret;
3524 }
3525 
3526 /* do_socketpair() Must return target values and target errnos. */
3527 static abi_long do_socketpair(int domain, int type, int protocol,
3528                               abi_ulong target_tab_addr)
3529 {
3530     int tab[2];
3531     abi_long ret;
3532 
3533     target_to_host_sock_type(&type);
3534 
3535     ret = get_errno(socketpair(domain, type, protocol, tab));
3536     if (!is_error(ret)) {
3537         if (put_user_s32(tab[0], target_tab_addr)
3538             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3539             ret = -TARGET_EFAULT;
3540     }
3541     return ret;
3542 }
3543 
3544 /* do_sendto() Must return target values and target errnos. */
3545 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3546                           abi_ulong target_addr, socklen_t addrlen)
3547 {
3548     void *addr;
3549     void *host_msg;
3550     void *copy_msg = NULL;
3551     abi_long ret;
3552 
3553     if ((int)addrlen < 0) {
3554         return -TARGET_EINVAL;
3555     }
3556 
3557     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3558     if (!host_msg)
3559         return -TARGET_EFAULT;
3560     if (fd_trans_target_to_host_data(fd)) {
3561         copy_msg = host_msg;
3562         host_msg = g_malloc(len);
3563         memcpy(host_msg, copy_msg, len);
3564         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3565         if (ret < 0) {
3566             goto fail;
3567         }
3568     }
3569     if (target_addr) {
3570         addr = alloca(addrlen+1);
3571         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3572         if (ret) {
3573             goto fail;
3574         }
3575         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3576     } else {
3577         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3578     }
3579 fail:
3580     if (copy_msg) {
3581         g_free(host_msg);
3582         host_msg = copy_msg;
3583     }
3584     unlock_user(host_msg, msg, 0);
3585     return ret;
3586 }
3587 
3588 /* do_recvfrom() Must return target values and target errnos. */
3589 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3590                             abi_ulong target_addr,
3591                             abi_ulong target_addrlen)
3592 {
3593     socklen_t addrlen, ret_addrlen;
3594     void *addr;
3595     void *host_msg;
3596     abi_long ret;
3597 
3598     if (!msg) {
3599         host_msg = NULL;
3600     } else {
3601         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3602         if (!host_msg) {
3603             return -TARGET_EFAULT;
3604         }
3605     }
3606     if (target_addr) {
3607         if (get_user_u32(addrlen, target_addrlen)) {
3608             ret = -TARGET_EFAULT;
3609             goto fail;
3610         }
3611         if ((int)addrlen < 0) {
3612             ret = -TARGET_EINVAL;
3613             goto fail;
3614         }
3615         addr = alloca(addrlen);
3616         ret_addrlen = addrlen;
3617         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3618                                       addr, &ret_addrlen));
3619     } else {
3620         addr = NULL; /* To keep compiler quiet.  */
3621         addrlen = 0; /* To keep compiler quiet.  */
3622         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3623     }
3624     if (!is_error(ret)) {
3625         if (fd_trans_host_to_target_data(fd)) {
3626             abi_long trans;
3627             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3628             if (is_error(trans)) {
3629                 ret = trans;
3630                 goto fail;
3631             }
3632         }
3633         if (target_addr) {
3634             host_to_target_sockaddr(target_addr, addr,
3635                                     MIN(addrlen, ret_addrlen));
3636             if (put_user_u32(ret_addrlen, target_addrlen)) {
3637                 ret = -TARGET_EFAULT;
3638                 goto fail;
3639             }
3640         }
3641         unlock_user(host_msg, msg, len);
3642     } else {
3643 fail:
3644         unlock_user(host_msg, msg, 0);
3645     }
3646     return ret;
3647 }
3648 
3649 #ifdef TARGET_NR_socketcall
3650 /* do_socketcall() must return target values and target errnos. */
3651 static abi_long do_socketcall(int num, abi_ulong vptr)
3652 {
3653     static const unsigned nargs[] = { /* number of arguments per operation */
3654         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3655         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3656         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3657         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3658         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3659         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3660         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3661         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3662         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3663         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3664         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3665         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3666         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3667         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3668         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3669         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3670         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3671         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3672         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3673         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3674     };
3675     abi_long a[6]; /* max 6 args */
3676     unsigned i;
3677 
3678     /* check the range of the first argument num */
3679     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3680     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3681         return -TARGET_EINVAL;
3682     }
3683     /* ensure we have space for args */
3684     if (nargs[num] > ARRAY_SIZE(a)) {
3685         return -TARGET_EINVAL;
3686     }
3687     /* collect the arguments in a[] according to nargs[] */
3688     for (i = 0; i < nargs[num]; ++i) {
3689         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3690             return -TARGET_EFAULT;
3691         }
3692     }
3693     /* now when we have the args, invoke the appropriate underlying function */
3694     switch (num) {
3695     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3696         return do_socket(a[0], a[1], a[2]);
3697     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3698         return do_bind(a[0], a[1], a[2]);
3699     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3700         return do_connect(a[0], a[1], a[2]);
3701     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3702         return get_errno(listen(a[0], a[1]));
3703     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3704         return do_accept4(a[0], a[1], a[2], 0);
3705     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3706         return do_getsockname(a[0], a[1], a[2]);
3707     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3708         return do_getpeername(a[0], a[1], a[2]);
3709     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3710         return do_socketpair(a[0], a[1], a[2], a[3]);
3711     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3712         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3713     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3714         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3715     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3716         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3717     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3718         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3719     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3720         return get_errno(shutdown(a[0], a[1]));
3721     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3722         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3723     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3724         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3725     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3726         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3727     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3728         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3729     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3730         return do_accept4(a[0], a[1], a[2], a[3]);
3731     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3732         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3733     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3734         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3735     default:
3736         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3737         return -TARGET_EINVAL;
3738     }
3739 }
3740 #endif
3741 
3742 #define N_SHM_REGIONS	32
3743 
3744 static struct shm_region {
3745     abi_ulong start;
3746     abi_ulong size;
3747     bool in_use;
3748 } shm_regions[N_SHM_REGIONS];
3749 
3750 #ifndef TARGET_SEMID64_DS
3751 /* asm-generic version of this struct */
3752 struct target_semid64_ds
3753 {
3754   struct target_ipc_perm sem_perm;
3755   abi_ulong sem_otime;
3756 #if TARGET_ABI_BITS == 32
3757   abi_ulong __unused1;
3758 #endif
3759   abi_ulong sem_ctime;
3760 #if TARGET_ABI_BITS == 32
3761   abi_ulong __unused2;
3762 #endif
3763   abi_ulong sem_nsems;
3764   abi_ulong __unused3;
3765   abi_ulong __unused4;
3766 };
3767 #endif
3768 
3769 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3770                                                abi_ulong target_addr)
3771 {
3772     struct target_ipc_perm *target_ip;
3773     struct target_semid64_ds *target_sd;
3774 
3775     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3776         return -TARGET_EFAULT;
3777     target_ip = &(target_sd->sem_perm);
3778     host_ip->__key = tswap32(target_ip->__key);
3779     host_ip->uid = tswap32(target_ip->uid);
3780     host_ip->gid = tswap32(target_ip->gid);
3781     host_ip->cuid = tswap32(target_ip->cuid);
3782     host_ip->cgid = tswap32(target_ip->cgid);
3783 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3784     host_ip->mode = tswap32(target_ip->mode);
3785 #else
3786     host_ip->mode = tswap16(target_ip->mode);
3787 #endif
3788 #if defined(TARGET_PPC)
3789     host_ip->__seq = tswap32(target_ip->__seq);
3790 #else
3791     host_ip->__seq = tswap16(target_ip->__seq);
3792 #endif
3793     unlock_user_struct(target_sd, target_addr, 0);
3794     return 0;
3795 }
3796 
3797 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3798                                                struct ipc_perm *host_ip)
3799 {
3800     struct target_ipc_perm *target_ip;
3801     struct target_semid64_ds *target_sd;
3802 
3803     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3804         return -TARGET_EFAULT;
3805     target_ip = &(target_sd->sem_perm);
3806     target_ip->__key = tswap32(host_ip->__key);
3807     target_ip->uid = tswap32(host_ip->uid);
3808     target_ip->gid = tswap32(host_ip->gid);
3809     target_ip->cuid = tswap32(host_ip->cuid);
3810     target_ip->cgid = tswap32(host_ip->cgid);
3811 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3812     target_ip->mode = tswap32(host_ip->mode);
3813 #else
3814     target_ip->mode = tswap16(host_ip->mode);
3815 #endif
3816 #if defined(TARGET_PPC)
3817     target_ip->__seq = tswap32(host_ip->__seq);
3818 #else
3819     target_ip->__seq = tswap16(host_ip->__seq);
3820 #endif
3821     unlock_user_struct(target_sd, target_addr, 1);
3822     return 0;
3823 }
3824 
3825 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3826                                                abi_ulong target_addr)
3827 {
3828     struct target_semid64_ds *target_sd;
3829 
3830     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3831         return -TARGET_EFAULT;
3832     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3833         return -TARGET_EFAULT;
3834     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3835     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3836     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3837     unlock_user_struct(target_sd, target_addr, 0);
3838     return 0;
3839 }
3840 
3841 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3842                                                struct semid_ds *host_sd)
3843 {
3844     struct target_semid64_ds *target_sd;
3845 
3846     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3847         return -TARGET_EFAULT;
3848     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3849         return -TARGET_EFAULT;
3850     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3851     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3852     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3853     unlock_user_struct(target_sd, target_addr, 1);
3854     return 0;
3855 }
3856 
3857 struct target_seminfo {
3858     int semmap;
3859     int semmni;
3860     int semmns;
3861     int semmnu;
3862     int semmsl;
3863     int semopm;
3864     int semume;
3865     int semusz;
3866     int semvmx;
3867     int semaem;
3868 };
3869 
3870 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3871                                               struct seminfo *host_seminfo)
3872 {
3873     struct target_seminfo *target_seminfo;
3874     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3875         return -TARGET_EFAULT;
3876     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3877     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3878     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3879     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3880     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3881     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3882     __put_user(host_seminfo->semume, &target_seminfo->semume);
3883     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3884     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3885     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3886     unlock_user_struct(target_seminfo, target_addr, 1);
3887     return 0;
3888 }
3889 
3890 union semun {
3891 	int val;
3892 	struct semid_ds *buf;
3893 	unsigned short *array;
3894 	struct seminfo *__buf;
3895 };
3896 
3897 union target_semun {
3898 	int val;
3899 	abi_ulong buf;
3900 	abi_ulong array;
3901 	abi_ulong __buf;
3902 };
3903 
3904 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3905                                                abi_ulong target_addr)
3906 {
3907     int nsems;
3908     unsigned short *array;
3909     union semun semun;
3910     struct semid_ds semid_ds;
3911     int i, ret;
3912 
3913     semun.buf = &semid_ds;
3914 
3915     ret = semctl(semid, 0, IPC_STAT, semun);
3916     if (ret == -1)
3917         return get_errno(ret);
3918 
3919     nsems = semid_ds.sem_nsems;
3920 
3921     *host_array = g_try_new(unsigned short, nsems);
3922     if (!*host_array) {
3923         return -TARGET_ENOMEM;
3924     }
3925     array = lock_user(VERIFY_READ, target_addr,
3926                       nsems*sizeof(unsigned short), 1);
3927     if (!array) {
3928         g_free(*host_array);
3929         return -TARGET_EFAULT;
3930     }
3931 
3932     for(i=0; i<nsems; i++) {
3933         __get_user((*host_array)[i], &array[i]);
3934     }
3935     unlock_user(array, target_addr, 0);
3936 
3937     return 0;
3938 }
3939 
3940 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3941                                                unsigned short **host_array)
3942 {
3943     int nsems;
3944     unsigned short *array;
3945     union semun semun;
3946     struct semid_ds semid_ds;
3947     int i, ret;
3948 
3949     semun.buf = &semid_ds;
3950 
3951     ret = semctl(semid, 0, IPC_STAT, semun);
3952     if (ret == -1)
3953         return get_errno(ret);
3954 
3955     nsems = semid_ds.sem_nsems;
3956 
3957     array = lock_user(VERIFY_WRITE, target_addr,
3958                       nsems*sizeof(unsigned short), 0);
3959     if (!array)
3960         return -TARGET_EFAULT;
3961 
3962     for(i=0; i<nsems; i++) {
3963         __put_user((*host_array)[i], &array[i]);
3964     }
3965     g_free(*host_array);
3966     unlock_user(array, target_addr, 1);
3967 
3968     return 0;
3969 }
3970 
3971 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3972                                  abi_ulong target_arg)
3973 {
3974     union target_semun target_su = { .buf = target_arg };
3975     union semun arg;
3976     struct semid_ds dsarg;
3977     unsigned short *array = NULL;
3978     struct seminfo seminfo;
3979     abi_long ret = -TARGET_EINVAL;
3980     abi_long err;
3981     cmd &= 0xff;
3982 
3983     switch( cmd ) {
3984 	case GETVAL:
3985 	case SETVAL:
3986             /* In 64 bit cross-endian situations, we will erroneously pick up
3987              * the wrong half of the union for the "val" element.  To rectify
3988              * this, the entire 8-byte structure is byteswapped, followed by
3989 	     * a swap of the 4 byte val field. In other cases, the data is
3990 	     * already in proper host byte order. */
3991 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3992 		target_su.buf = tswapal(target_su.buf);
3993 		arg.val = tswap32(target_su.val);
3994 	    } else {
3995 		arg.val = target_su.val;
3996 	    }
3997             ret = get_errno(semctl(semid, semnum, cmd, arg));
3998             break;
3999 	case GETALL:
4000 	case SETALL:
4001             err = target_to_host_semarray(semid, &array, target_su.array);
4002             if (err)
4003                 return err;
4004             arg.array = array;
4005             ret = get_errno(semctl(semid, semnum, cmd, arg));
4006             err = host_to_target_semarray(semid, target_su.array, &array);
4007             if (err)
4008                 return err;
4009             break;
4010 	case IPC_STAT:
4011 	case IPC_SET:
4012 	case SEM_STAT:
4013             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4014             if (err)
4015                 return err;
4016             arg.buf = &dsarg;
4017             ret = get_errno(semctl(semid, semnum, cmd, arg));
4018             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4019             if (err)
4020                 return err;
4021             break;
4022 	case IPC_INFO:
4023 	case SEM_INFO:
4024             arg.__buf = &seminfo;
4025             ret = get_errno(semctl(semid, semnum, cmd, arg));
4026             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4027             if (err)
4028                 return err;
4029             break;
4030 	case IPC_RMID:
4031 	case GETPID:
4032 	case GETNCNT:
4033 	case GETZCNT:
4034             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4035             break;
4036     }
4037 
4038     return ret;
4039 }
4040 
4041 struct target_sembuf {
4042     unsigned short sem_num;
4043     short sem_op;
4044     short sem_flg;
4045 };
4046 
4047 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4048                                              abi_ulong target_addr,
4049                                              unsigned nsops)
4050 {
4051     struct target_sembuf *target_sembuf;
4052     int i;
4053 
4054     target_sembuf = lock_user(VERIFY_READ, target_addr,
4055                               nsops*sizeof(struct target_sembuf), 1);
4056     if (!target_sembuf)
4057         return -TARGET_EFAULT;
4058 
4059     for(i=0; i<nsops; i++) {
4060         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4061         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4062         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4063     }
4064 
4065     unlock_user(target_sembuf, target_addr, 0);
4066 
4067     return 0;
4068 }
4069 
4070 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4071     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4072 
4073 /*
4074  * This macro is required to handle the s390 variants, which passes the
4075  * arguments in a different order than default.
4076  */
4077 #ifdef __s390x__
4078 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4079   (__nsops), (__timeout), (__sops)
4080 #else
4081 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4082   (__nsops), 0, (__sops), (__timeout)
4083 #endif
4084 
4085 static inline abi_long do_semtimedop(int semid,
4086                                      abi_long ptr,
4087                                      unsigned nsops,
4088                                      abi_long timeout, bool time64)
4089 {
4090     struct sembuf *sops;
4091     struct timespec ts, *pts = NULL;
4092     abi_long ret;
4093 
4094     if (timeout) {
4095         pts = &ts;
4096         if (time64) {
4097             if (target_to_host_timespec64(pts, timeout)) {
4098                 return -TARGET_EFAULT;
4099             }
4100         } else {
4101             if (target_to_host_timespec(pts, timeout)) {
4102                 return -TARGET_EFAULT;
4103             }
4104         }
4105     }
4106 
4107     if (nsops > TARGET_SEMOPM) {
4108         return -TARGET_E2BIG;
4109     }
4110 
4111     sops = g_new(struct sembuf, nsops);
4112 
4113     if (target_to_host_sembuf(sops, ptr, nsops)) {
4114         g_free(sops);
4115         return -TARGET_EFAULT;
4116     }
4117 
4118     ret = -TARGET_ENOSYS;
4119 #ifdef __NR_semtimedop
4120     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4121 #endif
4122 #ifdef __NR_ipc
4123     if (ret == -TARGET_ENOSYS) {
4124         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4125                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4126     }
4127 #endif
4128     g_free(sops);
4129     return ret;
4130 }
4131 #endif
4132 
4133 struct target_msqid_ds
4134 {
4135     struct target_ipc_perm msg_perm;
4136     abi_ulong msg_stime;
4137 #if TARGET_ABI_BITS == 32
4138     abi_ulong __unused1;
4139 #endif
4140     abi_ulong msg_rtime;
4141 #if TARGET_ABI_BITS == 32
4142     abi_ulong __unused2;
4143 #endif
4144     abi_ulong msg_ctime;
4145 #if TARGET_ABI_BITS == 32
4146     abi_ulong __unused3;
4147 #endif
4148     abi_ulong __msg_cbytes;
4149     abi_ulong msg_qnum;
4150     abi_ulong msg_qbytes;
4151     abi_ulong msg_lspid;
4152     abi_ulong msg_lrpid;
4153     abi_ulong __unused4;
4154     abi_ulong __unused5;
4155 };
4156 
4157 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4158                                                abi_ulong target_addr)
4159 {
4160     struct target_msqid_ds *target_md;
4161 
4162     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4163         return -TARGET_EFAULT;
4164     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4165         return -TARGET_EFAULT;
4166     host_md->msg_stime = tswapal(target_md->msg_stime);
4167     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4168     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4169     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4170     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4171     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4172     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4173     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4174     unlock_user_struct(target_md, target_addr, 0);
4175     return 0;
4176 }
4177 
4178 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4179                                                struct msqid_ds *host_md)
4180 {
4181     struct target_msqid_ds *target_md;
4182 
4183     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4184         return -TARGET_EFAULT;
4185     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4186         return -TARGET_EFAULT;
4187     target_md->msg_stime = tswapal(host_md->msg_stime);
4188     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4189     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4190     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4191     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4192     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4193     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4194     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4195     unlock_user_struct(target_md, target_addr, 1);
4196     return 0;
4197 }
4198 
4199 struct target_msginfo {
4200     int msgpool;
4201     int msgmap;
4202     int msgmax;
4203     int msgmnb;
4204     int msgmni;
4205     int msgssz;
4206     int msgtql;
4207     unsigned short int msgseg;
4208 };
4209 
4210 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4211                                               struct msginfo *host_msginfo)
4212 {
4213     struct target_msginfo *target_msginfo;
4214     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4215         return -TARGET_EFAULT;
4216     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4217     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4218     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4219     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4220     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4221     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4222     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4223     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4224     unlock_user_struct(target_msginfo, target_addr, 1);
4225     return 0;
4226 }
4227 
4228 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4229 {
4230     struct msqid_ds dsarg;
4231     struct msginfo msginfo;
4232     abi_long ret = -TARGET_EINVAL;
4233 
4234     cmd &= 0xff;
4235 
4236     switch (cmd) {
4237     case IPC_STAT:
4238     case IPC_SET:
4239     case MSG_STAT:
4240         if (target_to_host_msqid_ds(&dsarg,ptr))
4241             return -TARGET_EFAULT;
4242         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4243         if (host_to_target_msqid_ds(ptr,&dsarg))
4244             return -TARGET_EFAULT;
4245         break;
4246     case IPC_RMID:
4247         ret = get_errno(msgctl(msgid, cmd, NULL));
4248         break;
4249     case IPC_INFO:
4250     case MSG_INFO:
4251         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4252         if (host_to_target_msginfo(ptr, &msginfo))
4253             return -TARGET_EFAULT;
4254         break;
4255     }
4256 
4257     return ret;
4258 }
4259 
4260 struct target_msgbuf {
4261     abi_long mtype;
4262     char	mtext[1];
4263 };
4264 
4265 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4266                                  ssize_t msgsz, int msgflg)
4267 {
4268     struct target_msgbuf *target_mb;
4269     struct msgbuf *host_mb;
4270     abi_long ret = 0;
4271 
4272     if (msgsz < 0) {
4273         return -TARGET_EINVAL;
4274     }
4275 
4276     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4277         return -TARGET_EFAULT;
4278     host_mb = g_try_malloc(msgsz + sizeof(long));
4279     if (!host_mb) {
4280         unlock_user_struct(target_mb, msgp, 0);
4281         return -TARGET_ENOMEM;
4282     }
4283     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4284     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4285     ret = -TARGET_ENOSYS;
4286 #ifdef __NR_msgsnd
4287     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4288 #endif
4289 #ifdef __NR_ipc
4290     if (ret == -TARGET_ENOSYS) {
4291 #ifdef __s390x__
4292         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4293                                  host_mb));
4294 #else
4295         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4296                                  host_mb, 0));
4297 #endif
4298     }
4299 #endif
4300     g_free(host_mb);
4301     unlock_user_struct(target_mb, msgp, 0);
4302 
4303     return ret;
4304 }
4305 
4306 #ifdef __NR_ipc
4307 #if defined(__sparc__)
4308 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4309 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4310 #elif defined(__s390x__)
4311 /* The s390 sys_ipc variant has only five parameters.  */
4312 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4313     ((long int[]){(long int)__msgp, __msgtyp})
4314 #else
4315 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4316     ((long int[]){(long int)__msgp, __msgtyp}), 0
4317 #endif
4318 #endif
4319 
4320 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4321                                  ssize_t msgsz, abi_long msgtyp,
4322                                  int msgflg)
4323 {
4324     struct target_msgbuf *target_mb;
4325     char *target_mtext;
4326     struct msgbuf *host_mb;
4327     abi_long ret = 0;
4328 
4329     if (msgsz < 0) {
4330         return -TARGET_EINVAL;
4331     }
4332 
4333     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4334         return -TARGET_EFAULT;
4335 
4336     host_mb = g_try_malloc(msgsz + sizeof(long));
4337     if (!host_mb) {
4338         ret = -TARGET_ENOMEM;
4339         goto end;
4340     }
4341     ret = -TARGET_ENOSYS;
4342 #ifdef __NR_msgrcv
4343     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4344 #endif
4345 #ifdef __NR_ipc
4346     if (ret == -TARGET_ENOSYS) {
4347         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4348                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4349     }
4350 #endif
4351 
4352     if (ret > 0) {
4353         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4354         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4355         if (!target_mtext) {
4356             ret = -TARGET_EFAULT;
4357             goto end;
4358         }
4359         memcpy(target_mb->mtext, host_mb->mtext, ret);
4360         unlock_user(target_mtext, target_mtext_addr, ret);
4361     }
4362 
4363     target_mb->mtype = tswapal(host_mb->mtype);
4364 
4365 end:
4366     if (target_mb)
4367         unlock_user_struct(target_mb, msgp, 1);
4368     g_free(host_mb);
4369     return ret;
4370 }
4371 
4372 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4373                                                abi_ulong target_addr)
4374 {
4375     struct target_shmid_ds *target_sd;
4376 
4377     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4378         return -TARGET_EFAULT;
4379     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4380         return -TARGET_EFAULT;
4381     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4382     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4383     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4384     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4385     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4386     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4387     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4388     unlock_user_struct(target_sd, target_addr, 0);
4389     return 0;
4390 }
4391 
4392 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4393                                                struct shmid_ds *host_sd)
4394 {
4395     struct target_shmid_ds *target_sd;
4396 
4397     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4398         return -TARGET_EFAULT;
4399     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4400         return -TARGET_EFAULT;
4401     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4402     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4403     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4404     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4405     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4406     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4407     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4408     unlock_user_struct(target_sd, target_addr, 1);
4409     return 0;
4410 }
4411 
4412 struct  target_shminfo {
4413     abi_ulong shmmax;
4414     abi_ulong shmmin;
4415     abi_ulong shmmni;
4416     abi_ulong shmseg;
4417     abi_ulong shmall;
4418 };
4419 
4420 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4421                                               struct shminfo *host_shminfo)
4422 {
4423     struct target_shminfo *target_shminfo;
4424     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4425         return -TARGET_EFAULT;
4426     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4427     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4428     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4429     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4430     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4431     unlock_user_struct(target_shminfo, target_addr, 1);
4432     return 0;
4433 }
4434 
4435 struct target_shm_info {
4436     int used_ids;
4437     abi_ulong shm_tot;
4438     abi_ulong shm_rss;
4439     abi_ulong shm_swp;
4440     abi_ulong swap_attempts;
4441     abi_ulong swap_successes;
4442 };
4443 
4444 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4445                                                struct shm_info *host_shm_info)
4446 {
4447     struct target_shm_info *target_shm_info;
4448     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4449         return -TARGET_EFAULT;
4450     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4451     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4452     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4453     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4454     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4455     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4456     unlock_user_struct(target_shm_info, target_addr, 1);
4457     return 0;
4458 }
4459 
4460 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4461 {
4462     struct shmid_ds dsarg;
4463     struct shminfo shminfo;
4464     struct shm_info shm_info;
4465     abi_long ret = -TARGET_EINVAL;
4466 
4467     cmd &= 0xff;
4468 
4469     switch(cmd) {
4470     case IPC_STAT:
4471     case IPC_SET:
4472     case SHM_STAT:
4473         if (target_to_host_shmid_ds(&dsarg, buf))
4474             return -TARGET_EFAULT;
4475         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4476         if (host_to_target_shmid_ds(buf, &dsarg))
4477             return -TARGET_EFAULT;
4478         break;
4479     case IPC_INFO:
4480         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4481         if (host_to_target_shminfo(buf, &shminfo))
4482             return -TARGET_EFAULT;
4483         break;
4484     case SHM_INFO:
4485         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4486         if (host_to_target_shm_info(buf, &shm_info))
4487             return -TARGET_EFAULT;
4488         break;
4489     case IPC_RMID:
4490     case SHM_LOCK:
4491     case SHM_UNLOCK:
4492         ret = get_errno(shmctl(shmid, cmd, NULL));
4493         break;
4494     }
4495 
4496     return ret;
4497 }
4498 
4499 #ifndef TARGET_FORCE_SHMLBA
4500 /* For most architectures, SHMLBA is the same as the page size;
4501  * some architectures have larger values, in which case they should
4502  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4503  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4504  * and defining its own value for SHMLBA.
4505  *
4506  * The kernel also permits SHMLBA to be set by the architecture to a
4507  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4508  * this means that addresses are rounded to the large size if
4509  * SHM_RND is set but addresses not aligned to that size are not rejected
4510  * as long as they are at least page-aligned. Since the only architecture
4511  * which uses this is ia64 this code doesn't provide for that oddity.
4512  */
4513 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4514 {
4515     return TARGET_PAGE_SIZE;
4516 }
4517 #endif
4518 
4519 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4520                                  int shmid, abi_ulong shmaddr, int shmflg)
4521 {
4522     CPUState *cpu = env_cpu(cpu_env);
4523     abi_long raddr;
4524     void *host_raddr;
4525     struct shmid_ds shm_info;
4526     int i,ret;
4527     abi_ulong shmlba;
4528 
4529     /* shmat pointers are always untagged */
4530 
4531     /* find out the length of the shared memory segment */
4532     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4533     if (is_error(ret)) {
4534         /* can't get length, bail out */
4535         return ret;
4536     }
4537 
4538     shmlba = target_shmlba(cpu_env);
4539 
4540     if (shmaddr & (shmlba - 1)) {
4541         if (shmflg & SHM_RND) {
4542             shmaddr &= ~(shmlba - 1);
4543         } else {
4544             return -TARGET_EINVAL;
4545         }
4546     }
4547     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4548         return -TARGET_EINVAL;
4549     }
4550 
4551     mmap_lock();
4552 
4553     /*
4554      * We're mapping shared memory, so ensure we generate code for parallel
4555      * execution and flush old translations.  This will work up to the level
4556      * supported by the host -- anything that requires EXCP_ATOMIC will not
4557      * be atomic with respect to an external process.
4558      */
4559     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4560         cpu->tcg_cflags |= CF_PARALLEL;
4561         tb_flush(cpu);
4562     }
4563 
4564     if (shmaddr)
4565         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4566     else {
4567         abi_ulong mmap_start;
4568 
4569         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4570         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4571 
4572         if (mmap_start == -1) {
4573             errno = ENOMEM;
4574             host_raddr = (void *)-1;
4575         } else
4576             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4577                                shmflg | SHM_REMAP);
4578     }
4579 
4580     if (host_raddr == (void *)-1) {
4581         mmap_unlock();
4582         return get_errno((long)host_raddr);
4583     }
4584     raddr=h2g((unsigned long)host_raddr);
4585 
4586     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4587                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4588                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4589 
4590     for (i = 0; i < N_SHM_REGIONS; i++) {
4591         if (!shm_regions[i].in_use) {
4592             shm_regions[i].in_use = true;
4593             shm_regions[i].start = raddr;
4594             shm_regions[i].size = shm_info.shm_segsz;
4595             break;
4596         }
4597     }
4598 
4599     mmap_unlock();
4600     return raddr;
4601 
4602 }
4603 
4604 static inline abi_long do_shmdt(abi_ulong shmaddr)
4605 {
4606     int i;
4607     abi_long rv;
4608 
4609     /* shmdt pointers are always untagged */
4610 
4611     mmap_lock();
4612 
4613     for (i = 0; i < N_SHM_REGIONS; ++i) {
4614         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4615             shm_regions[i].in_use = false;
4616             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4617             break;
4618         }
4619     }
4620     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4621 
4622     mmap_unlock();
4623 
4624     return rv;
4625 }
4626 
4627 #ifdef TARGET_NR_ipc
4628 /* ??? This only works with linear mappings.  */
4629 /* do_ipc() must return target values and target errnos. */
4630 static abi_long do_ipc(CPUArchState *cpu_env,
4631                        unsigned int call, abi_long first,
4632                        abi_long second, abi_long third,
4633                        abi_long ptr, abi_long fifth)
4634 {
4635     int version;
4636     abi_long ret = 0;
4637 
4638     version = call >> 16;
4639     call &= 0xffff;
4640 
4641     switch (call) {
4642     case IPCOP_semop:
4643         ret = do_semtimedop(first, ptr, second, 0, false);
4644         break;
4645     case IPCOP_semtimedop:
4646     /*
4647      * The s390 sys_ipc variant has only five parameters instead of six
4648      * (as for default variant) and the only difference is the handling of
4649      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4650      * to a struct timespec where the generic variant uses fifth parameter.
4651      */
4652 #if defined(TARGET_S390X)
4653         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4654 #else
4655         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4656 #endif
4657         break;
4658 
4659     case IPCOP_semget:
4660         ret = get_errno(semget(first, second, third));
4661         break;
4662 
4663     case IPCOP_semctl: {
4664         /* The semun argument to semctl is passed by value, so dereference the
4665          * ptr argument. */
4666         abi_ulong atptr;
4667         get_user_ual(atptr, ptr);
4668         ret = do_semctl(first, second, third, atptr);
4669         break;
4670     }
4671 
4672     case IPCOP_msgget:
4673         ret = get_errno(msgget(first, second));
4674         break;
4675 
4676     case IPCOP_msgsnd:
4677         ret = do_msgsnd(first, ptr, second, third);
4678         break;
4679 
4680     case IPCOP_msgctl:
4681         ret = do_msgctl(first, second, ptr);
4682         break;
4683 
4684     case IPCOP_msgrcv:
4685         switch (version) {
4686         case 0:
4687             {
4688                 struct target_ipc_kludge {
4689                     abi_long msgp;
4690                     abi_long msgtyp;
4691                 } *tmp;
4692 
4693                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4694                     ret = -TARGET_EFAULT;
4695                     break;
4696                 }
4697 
4698                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4699 
4700                 unlock_user_struct(tmp, ptr, 0);
4701                 break;
4702             }
4703         default:
4704             ret = do_msgrcv(first, ptr, second, fifth, third);
4705         }
4706         break;
4707 
4708     case IPCOP_shmat:
4709         switch (version) {
4710         default:
4711         {
4712             abi_ulong raddr;
4713             raddr = do_shmat(cpu_env, first, ptr, second);
4714             if (is_error(raddr))
4715                 return get_errno(raddr);
4716             if (put_user_ual(raddr, third))
4717                 return -TARGET_EFAULT;
4718             break;
4719         }
4720         case 1:
4721             ret = -TARGET_EINVAL;
4722             break;
4723         }
4724 	break;
4725     case IPCOP_shmdt:
4726         ret = do_shmdt(ptr);
4727 	break;
4728 
4729     case IPCOP_shmget:
4730 	/* IPC_* flag values are the same on all linux platforms */
4731 	ret = get_errno(shmget(first, second, third));
4732 	break;
4733 
4734 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4735     case IPCOP_shmctl:
4736         ret = do_shmctl(first, second, ptr);
4737         break;
4738     default:
4739         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4740                       call, version);
4741 	ret = -TARGET_ENOSYS;
4742 	break;
4743     }
4744     return ret;
4745 }
4746 #endif
4747 
4748 /* kernel structure types definitions */
4749 
4750 #define STRUCT(name, ...) STRUCT_ ## name,
4751 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4752 enum {
4753 #include "syscall_types.h"
4754 STRUCT_MAX
4755 };
4756 #undef STRUCT
4757 #undef STRUCT_SPECIAL
4758 
4759 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4760 #define STRUCT_SPECIAL(name)
4761 #include "syscall_types.h"
4762 #undef STRUCT
4763 #undef STRUCT_SPECIAL
4764 
4765 #define MAX_STRUCT_SIZE 4096
4766 
4767 #ifdef CONFIG_FIEMAP
4768 /* So fiemap access checks don't overflow on 32 bit systems.
4769  * This is very slightly smaller than the limit imposed by
4770  * the underlying kernel.
4771  */
4772 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4773                             / sizeof(struct fiemap_extent))
4774 
4775 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4776                                        int fd, int cmd, abi_long arg)
4777 {
4778     /* The parameter for this ioctl is a struct fiemap followed
4779      * by an array of struct fiemap_extent whose size is set
4780      * in fiemap->fm_extent_count. The array is filled in by the
4781      * ioctl.
4782      */
4783     int target_size_in, target_size_out;
4784     struct fiemap *fm;
4785     const argtype *arg_type = ie->arg_type;
4786     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4787     void *argptr, *p;
4788     abi_long ret;
4789     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4790     uint32_t outbufsz;
4791     int free_fm = 0;
4792 
4793     assert(arg_type[0] == TYPE_PTR);
4794     assert(ie->access == IOC_RW);
4795     arg_type++;
4796     target_size_in = thunk_type_size(arg_type, 0);
4797     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4798     if (!argptr) {
4799         return -TARGET_EFAULT;
4800     }
4801     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4802     unlock_user(argptr, arg, 0);
4803     fm = (struct fiemap *)buf_temp;
4804     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4805         return -TARGET_EINVAL;
4806     }
4807 
4808     outbufsz = sizeof (*fm) +
4809         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4810 
4811     if (outbufsz > MAX_STRUCT_SIZE) {
4812         /* We can't fit all the extents into the fixed size buffer.
4813          * Allocate one that is large enough and use it instead.
4814          */
4815         fm = g_try_malloc(outbufsz);
4816         if (!fm) {
4817             return -TARGET_ENOMEM;
4818         }
4819         memcpy(fm, buf_temp, sizeof(struct fiemap));
4820         free_fm = 1;
4821     }
4822     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4823     if (!is_error(ret)) {
4824         target_size_out = target_size_in;
4825         /* An extent_count of 0 means we were only counting the extents
4826          * so there are no structs to copy
4827          */
4828         if (fm->fm_extent_count != 0) {
4829             target_size_out += fm->fm_mapped_extents * extent_size;
4830         }
4831         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4832         if (!argptr) {
4833             ret = -TARGET_EFAULT;
4834         } else {
4835             /* Convert the struct fiemap */
4836             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4837             if (fm->fm_extent_count != 0) {
4838                 p = argptr + target_size_in;
4839                 /* ...and then all the struct fiemap_extents */
4840                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4841                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4842                                   THUNK_TARGET);
4843                     p += extent_size;
4844                 }
4845             }
4846             unlock_user(argptr, arg, target_size_out);
4847         }
4848     }
4849     if (free_fm) {
4850         g_free(fm);
4851     }
4852     return ret;
4853 }
4854 #endif
4855 
4856 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4857                                 int fd, int cmd, abi_long arg)
4858 {
4859     const argtype *arg_type = ie->arg_type;
4860     int target_size;
4861     void *argptr;
4862     int ret;
4863     struct ifconf *host_ifconf;
4864     uint32_t outbufsz;
4865     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4866     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4867     int target_ifreq_size;
4868     int nb_ifreq;
4869     int free_buf = 0;
4870     int i;
4871     int target_ifc_len;
4872     abi_long target_ifc_buf;
4873     int host_ifc_len;
4874     char *host_ifc_buf;
4875 
4876     assert(arg_type[0] == TYPE_PTR);
4877     assert(ie->access == IOC_RW);
4878 
4879     arg_type++;
4880     target_size = thunk_type_size(arg_type, 0);
4881 
4882     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4883     if (!argptr)
4884         return -TARGET_EFAULT;
4885     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4886     unlock_user(argptr, arg, 0);
4887 
4888     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4889     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4890     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4891 
4892     if (target_ifc_buf != 0) {
4893         target_ifc_len = host_ifconf->ifc_len;
4894         nb_ifreq = target_ifc_len / target_ifreq_size;
4895         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4896 
4897         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4898         if (outbufsz > MAX_STRUCT_SIZE) {
4899             /*
4900              * We can't fit all the extents into the fixed size buffer.
4901              * Allocate one that is large enough and use it instead.
4902              */
4903             host_ifconf = g_try_malloc(outbufsz);
4904             if (!host_ifconf) {
4905                 return -TARGET_ENOMEM;
4906             }
4907             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4908             free_buf = 1;
4909         }
4910         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4911 
4912         host_ifconf->ifc_len = host_ifc_len;
4913     } else {
4914       host_ifc_buf = NULL;
4915     }
4916     host_ifconf->ifc_buf = host_ifc_buf;
4917 
4918     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4919     if (!is_error(ret)) {
4920 	/* convert host ifc_len to target ifc_len */
4921 
4922         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4923         target_ifc_len = nb_ifreq * target_ifreq_size;
4924         host_ifconf->ifc_len = target_ifc_len;
4925 
4926 	/* restore target ifc_buf */
4927 
4928         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4929 
4930 	/* copy struct ifconf to target user */
4931 
4932         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4933         if (!argptr)
4934             return -TARGET_EFAULT;
4935         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4936         unlock_user(argptr, arg, target_size);
4937 
4938         if (target_ifc_buf != 0) {
4939             /* copy ifreq[] to target user */
4940             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4941             for (i = 0; i < nb_ifreq ; i++) {
4942                 thunk_convert(argptr + i * target_ifreq_size,
4943                               host_ifc_buf + i * sizeof(struct ifreq),
4944                               ifreq_arg_type, THUNK_TARGET);
4945             }
4946             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4947         }
4948     }
4949 
4950     if (free_buf) {
4951         g_free(host_ifconf);
4952     }
4953 
4954     return ret;
4955 }
4956 
4957 #if defined(CONFIG_USBFS)
4958 #if HOST_LONG_BITS > 64
4959 #error USBDEVFS thunks do not support >64 bit hosts yet.
4960 #endif
4961 struct live_urb {
4962     uint64_t target_urb_adr;
4963     uint64_t target_buf_adr;
4964     char *target_buf_ptr;
4965     struct usbdevfs_urb host_urb;
4966 };
4967 
4968 static GHashTable *usbdevfs_urb_hashtable(void)
4969 {
4970     static GHashTable *urb_hashtable;
4971 
4972     if (!urb_hashtable) {
4973         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4974     }
4975     return urb_hashtable;
4976 }
4977 
4978 static void urb_hashtable_insert(struct live_urb *urb)
4979 {
4980     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4981     g_hash_table_insert(urb_hashtable, urb, urb);
4982 }
4983 
4984 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4985 {
4986     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4987     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4988 }
4989 
4990 static void urb_hashtable_remove(struct live_urb *urb)
4991 {
4992     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4993     g_hash_table_remove(urb_hashtable, urb);
4994 }
4995 
4996 static abi_long
4997 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4998                           int fd, int cmd, abi_long arg)
4999 {
5000     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5001     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5002     struct live_urb *lurb;
5003     void *argptr;
5004     uint64_t hurb;
5005     int target_size;
5006     uintptr_t target_urb_adr;
5007     abi_long ret;
5008 
5009     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5010 
5011     memset(buf_temp, 0, sizeof(uint64_t));
5012     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5013     if (is_error(ret)) {
5014         return ret;
5015     }
5016 
5017     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5018     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5019     if (!lurb->target_urb_adr) {
5020         return -TARGET_EFAULT;
5021     }
5022     urb_hashtable_remove(lurb);
5023     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5024         lurb->host_urb.buffer_length);
5025     lurb->target_buf_ptr = NULL;
5026 
5027     /* restore the guest buffer pointer */
5028     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5029 
5030     /* update the guest urb struct */
5031     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5032     if (!argptr) {
5033         g_free(lurb);
5034         return -TARGET_EFAULT;
5035     }
5036     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5037     unlock_user(argptr, lurb->target_urb_adr, target_size);
5038 
5039     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5040     /* write back the urb handle */
5041     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5042     if (!argptr) {
5043         g_free(lurb);
5044         return -TARGET_EFAULT;
5045     }
5046 
5047     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5048     target_urb_adr = lurb->target_urb_adr;
5049     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5050     unlock_user(argptr, arg, target_size);
5051 
5052     g_free(lurb);
5053     return ret;
5054 }
5055 
5056 static abi_long
5057 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5058                              uint8_t *buf_temp __attribute__((unused)),
5059                              int fd, int cmd, abi_long arg)
5060 {
5061     struct live_urb *lurb;
5062 
5063     /* map target address back to host URB with metadata. */
5064     lurb = urb_hashtable_lookup(arg);
5065     if (!lurb) {
5066         return -TARGET_EFAULT;
5067     }
5068     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5069 }
5070 
5071 static abi_long
5072 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5073                             int fd, int cmd, abi_long arg)
5074 {
5075     const argtype *arg_type = ie->arg_type;
5076     int target_size;
5077     abi_long ret;
5078     void *argptr;
5079     int rw_dir;
5080     struct live_urb *lurb;
5081 
5082     /*
5083      * each submitted URB needs to map to a unique ID for the
5084      * kernel, and that unique ID needs to be a pointer to
5085      * host memory.  hence, we need to malloc for each URB.
5086      * isochronous transfers have a variable length struct.
5087      */
5088     arg_type++;
5089     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5090 
5091     /* construct host copy of urb and metadata */
5092     lurb = g_try_new0(struct live_urb, 1);
5093     if (!lurb) {
5094         return -TARGET_ENOMEM;
5095     }
5096 
5097     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5098     if (!argptr) {
5099         g_free(lurb);
5100         return -TARGET_EFAULT;
5101     }
5102     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5103     unlock_user(argptr, arg, 0);
5104 
5105     lurb->target_urb_adr = arg;
5106     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5107 
5108     /* buffer space used depends on endpoint type so lock the entire buffer */
5109     /* control type urbs should check the buffer contents for true direction */
5110     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5111     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5112         lurb->host_urb.buffer_length, 1);
5113     if (lurb->target_buf_ptr == NULL) {
5114         g_free(lurb);
5115         return -TARGET_EFAULT;
5116     }
5117 
5118     /* update buffer pointer in host copy */
5119     lurb->host_urb.buffer = lurb->target_buf_ptr;
5120 
5121     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5122     if (is_error(ret)) {
5123         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5124         g_free(lurb);
5125     } else {
5126         urb_hashtable_insert(lurb);
5127     }
5128 
5129     return ret;
5130 }
5131 #endif /* CONFIG_USBFS */
5132 
5133 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5134                             int cmd, abi_long arg)
5135 {
5136     void *argptr;
5137     struct dm_ioctl *host_dm;
5138     abi_long guest_data;
5139     uint32_t guest_data_size;
5140     int target_size;
5141     const argtype *arg_type = ie->arg_type;
5142     abi_long ret;
5143     void *big_buf = NULL;
5144     char *host_data;
5145 
5146     arg_type++;
5147     target_size = thunk_type_size(arg_type, 0);
5148     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5149     if (!argptr) {
5150         ret = -TARGET_EFAULT;
5151         goto out;
5152     }
5153     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5154     unlock_user(argptr, arg, 0);
5155 
5156     /* buf_temp is too small, so fetch things into a bigger buffer */
5157     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5158     memcpy(big_buf, buf_temp, target_size);
5159     buf_temp = big_buf;
5160     host_dm = big_buf;
5161 
5162     guest_data = arg + host_dm->data_start;
5163     if ((guest_data - arg) < 0) {
5164         ret = -TARGET_EINVAL;
5165         goto out;
5166     }
5167     guest_data_size = host_dm->data_size - host_dm->data_start;
5168     host_data = (char*)host_dm + host_dm->data_start;
5169 
5170     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5171     if (!argptr) {
5172         ret = -TARGET_EFAULT;
5173         goto out;
5174     }
5175 
5176     switch (ie->host_cmd) {
5177     case DM_REMOVE_ALL:
5178     case DM_LIST_DEVICES:
5179     case DM_DEV_CREATE:
5180     case DM_DEV_REMOVE:
5181     case DM_DEV_SUSPEND:
5182     case DM_DEV_STATUS:
5183     case DM_DEV_WAIT:
5184     case DM_TABLE_STATUS:
5185     case DM_TABLE_CLEAR:
5186     case DM_TABLE_DEPS:
5187     case DM_LIST_VERSIONS:
5188         /* no input data */
5189         break;
5190     case DM_DEV_RENAME:
5191     case DM_DEV_SET_GEOMETRY:
5192         /* data contains only strings */
5193         memcpy(host_data, argptr, guest_data_size);
5194         break;
5195     case DM_TARGET_MSG:
5196         memcpy(host_data, argptr, guest_data_size);
5197         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5198         break;
5199     case DM_TABLE_LOAD:
5200     {
5201         void *gspec = argptr;
5202         void *cur_data = host_data;
5203         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5204         int spec_size = thunk_type_size(arg_type, 0);
5205         int i;
5206 
5207         for (i = 0; i < host_dm->target_count; i++) {
5208             struct dm_target_spec *spec = cur_data;
5209             uint32_t next;
5210             int slen;
5211 
5212             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5213             slen = strlen((char*)gspec + spec_size) + 1;
5214             next = spec->next;
5215             spec->next = sizeof(*spec) + slen;
5216             strcpy((char*)&spec[1], gspec + spec_size);
5217             gspec += next;
5218             cur_data += spec->next;
5219         }
5220         break;
5221     }
5222     default:
5223         ret = -TARGET_EINVAL;
5224         unlock_user(argptr, guest_data, 0);
5225         goto out;
5226     }
5227     unlock_user(argptr, guest_data, 0);
5228 
5229     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5230     if (!is_error(ret)) {
5231         guest_data = arg + host_dm->data_start;
5232         guest_data_size = host_dm->data_size - host_dm->data_start;
5233         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5234         switch (ie->host_cmd) {
5235         case DM_REMOVE_ALL:
5236         case DM_DEV_CREATE:
5237         case DM_DEV_REMOVE:
5238         case DM_DEV_RENAME:
5239         case DM_DEV_SUSPEND:
5240         case DM_DEV_STATUS:
5241         case DM_TABLE_LOAD:
5242         case DM_TABLE_CLEAR:
5243         case DM_TARGET_MSG:
5244         case DM_DEV_SET_GEOMETRY:
5245             /* no return data */
5246             break;
5247         case DM_LIST_DEVICES:
5248         {
5249             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5250             uint32_t remaining_data = guest_data_size;
5251             void *cur_data = argptr;
5252             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5253             int nl_size = 12; /* can't use thunk_size due to alignment */
5254 
5255             while (1) {
5256                 uint32_t next = nl->next;
5257                 if (next) {
5258                     nl->next = nl_size + (strlen(nl->name) + 1);
5259                 }
5260                 if (remaining_data < nl->next) {
5261                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5262                     break;
5263                 }
5264                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5265                 strcpy(cur_data + nl_size, nl->name);
5266                 cur_data += nl->next;
5267                 remaining_data -= nl->next;
5268                 if (!next) {
5269                     break;
5270                 }
5271                 nl = (void*)nl + next;
5272             }
5273             break;
5274         }
5275         case DM_DEV_WAIT:
5276         case DM_TABLE_STATUS:
5277         {
5278             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5279             void *cur_data = argptr;
5280             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5281             int spec_size = thunk_type_size(arg_type, 0);
5282             int i;
5283 
5284             for (i = 0; i < host_dm->target_count; i++) {
5285                 uint32_t next = spec->next;
5286                 int slen = strlen((char*)&spec[1]) + 1;
5287                 spec->next = (cur_data - argptr) + spec_size + slen;
5288                 if (guest_data_size < spec->next) {
5289                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5290                     break;
5291                 }
5292                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5293                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5294                 cur_data = argptr + spec->next;
5295                 spec = (void*)host_dm + host_dm->data_start + next;
5296             }
5297             break;
5298         }
5299         case DM_TABLE_DEPS:
5300         {
5301             void *hdata = (void*)host_dm + host_dm->data_start;
5302             int count = *(uint32_t*)hdata;
5303             uint64_t *hdev = hdata + 8;
5304             uint64_t *gdev = argptr + 8;
5305             int i;
5306 
5307             *(uint32_t*)argptr = tswap32(count);
5308             for (i = 0; i < count; i++) {
5309                 *gdev = tswap64(*hdev);
5310                 gdev++;
5311                 hdev++;
5312             }
5313             break;
5314         }
5315         case DM_LIST_VERSIONS:
5316         {
5317             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5318             uint32_t remaining_data = guest_data_size;
5319             void *cur_data = argptr;
5320             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5321             int vers_size = thunk_type_size(arg_type, 0);
5322 
5323             while (1) {
5324                 uint32_t next = vers->next;
5325                 if (next) {
5326                     vers->next = vers_size + (strlen(vers->name) + 1);
5327                 }
5328                 if (remaining_data < vers->next) {
5329                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5330                     break;
5331                 }
5332                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5333                 strcpy(cur_data + vers_size, vers->name);
5334                 cur_data += vers->next;
5335                 remaining_data -= vers->next;
5336                 if (!next) {
5337                     break;
5338                 }
5339                 vers = (void*)vers + next;
5340             }
5341             break;
5342         }
5343         default:
5344             unlock_user(argptr, guest_data, 0);
5345             ret = -TARGET_EINVAL;
5346             goto out;
5347         }
5348         unlock_user(argptr, guest_data, guest_data_size);
5349 
5350         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5351         if (!argptr) {
5352             ret = -TARGET_EFAULT;
5353             goto out;
5354         }
5355         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5356         unlock_user(argptr, arg, target_size);
5357     }
5358 out:
5359     g_free(big_buf);
5360     return ret;
5361 }
5362 
5363 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5364                                int cmd, abi_long arg)
5365 {
5366     void *argptr;
5367     int target_size;
5368     const argtype *arg_type = ie->arg_type;
5369     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5370     abi_long ret;
5371 
5372     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5373     struct blkpg_partition host_part;
5374 
5375     /* Read and convert blkpg */
5376     arg_type++;
5377     target_size = thunk_type_size(arg_type, 0);
5378     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5379     if (!argptr) {
5380         ret = -TARGET_EFAULT;
5381         goto out;
5382     }
5383     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5384     unlock_user(argptr, arg, 0);
5385 
5386     switch (host_blkpg->op) {
5387     case BLKPG_ADD_PARTITION:
5388     case BLKPG_DEL_PARTITION:
5389         /* payload is struct blkpg_partition */
5390         break;
5391     default:
5392         /* Unknown opcode */
5393         ret = -TARGET_EINVAL;
5394         goto out;
5395     }
5396 
5397     /* Read and convert blkpg->data */
5398     arg = (abi_long)(uintptr_t)host_blkpg->data;
5399     target_size = thunk_type_size(part_arg_type, 0);
5400     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5401     if (!argptr) {
5402         ret = -TARGET_EFAULT;
5403         goto out;
5404     }
5405     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5406     unlock_user(argptr, arg, 0);
5407 
5408     /* Swizzle the data pointer to our local copy and call! */
5409     host_blkpg->data = &host_part;
5410     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5411 
5412 out:
5413     return ret;
5414 }
5415 
5416 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5417                                 int fd, int cmd, abi_long arg)
5418 {
5419     const argtype *arg_type = ie->arg_type;
5420     const StructEntry *se;
5421     const argtype *field_types;
5422     const int *dst_offsets, *src_offsets;
5423     int target_size;
5424     void *argptr;
5425     abi_ulong *target_rt_dev_ptr = NULL;
5426     unsigned long *host_rt_dev_ptr = NULL;
5427     abi_long ret;
5428     int i;
5429 
5430     assert(ie->access == IOC_W);
5431     assert(*arg_type == TYPE_PTR);
5432     arg_type++;
5433     assert(*arg_type == TYPE_STRUCT);
5434     target_size = thunk_type_size(arg_type, 0);
5435     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5436     if (!argptr) {
5437         return -TARGET_EFAULT;
5438     }
5439     arg_type++;
5440     assert(*arg_type == (int)STRUCT_rtentry);
5441     se = struct_entries + *arg_type++;
5442     assert(se->convert[0] == NULL);
5443     /* convert struct here to be able to catch rt_dev string */
5444     field_types = se->field_types;
5445     dst_offsets = se->field_offsets[THUNK_HOST];
5446     src_offsets = se->field_offsets[THUNK_TARGET];
5447     for (i = 0; i < se->nb_fields; i++) {
5448         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5449             assert(*field_types == TYPE_PTRVOID);
5450             target_rt_dev_ptr = argptr + src_offsets[i];
5451             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5452             if (*target_rt_dev_ptr != 0) {
5453                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5454                                                   tswapal(*target_rt_dev_ptr));
5455                 if (!*host_rt_dev_ptr) {
5456                     unlock_user(argptr, arg, 0);
5457                     return -TARGET_EFAULT;
5458                 }
5459             } else {
5460                 *host_rt_dev_ptr = 0;
5461             }
5462             field_types++;
5463             continue;
5464         }
5465         field_types = thunk_convert(buf_temp + dst_offsets[i],
5466                                     argptr + src_offsets[i],
5467                                     field_types, THUNK_HOST);
5468     }
5469     unlock_user(argptr, arg, 0);
5470 
5471     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5472 
5473     assert(host_rt_dev_ptr != NULL);
5474     assert(target_rt_dev_ptr != NULL);
5475     if (*host_rt_dev_ptr != 0) {
5476         unlock_user((void *)*host_rt_dev_ptr,
5477                     *target_rt_dev_ptr, 0);
5478     }
5479     return ret;
5480 }
5481 
5482 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5483                                      int fd, int cmd, abi_long arg)
5484 {
5485     int sig = target_to_host_signal(arg);
5486     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5487 }
5488 
5489 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5490                                     int fd, int cmd, abi_long arg)
5491 {
5492     struct timeval tv;
5493     abi_long ret;
5494 
5495     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5496     if (is_error(ret)) {
5497         return ret;
5498     }
5499 
5500     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5501         if (copy_to_user_timeval(arg, &tv)) {
5502             return -TARGET_EFAULT;
5503         }
5504     } else {
5505         if (copy_to_user_timeval64(arg, &tv)) {
5506             return -TARGET_EFAULT;
5507         }
5508     }
5509 
5510     return ret;
5511 }
5512 
5513 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5514                                       int fd, int cmd, abi_long arg)
5515 {
5516     struct timespec ts;
5517     abi_long ret;
5518 
5519     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5520     if (is_error(ret)) {
5521         return ret;
5522     }
5523 
5524     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5525         if (host_to_target_timespec(arg, &ts)) {
5526             return -TARGET_EFAULT;
5527         }
5528     } else{
5529         if (host_to_target_timespec64(arg, &ts)) {
5530             return -TARGET_EFAULT;
5531         }
5532     }
5533 
5534     return ret;
5535 }
5536 
5537 #ifdef TIOCGPTPEER
5538 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5539                                      int fd, int cmd, abi_long arg)
5540 {
5541     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5542     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5543 }
5544 #endif
5545 
5546 #ifdef HAVE_DRM_H
5547 
5548 static void unlock_drm_version(struct drm_version *host_ver,
5549                                struct target_drm_version *target_ver,
5550                                bool copy)
5551 {
5552     unlock_user(host_ver->name, target_ver->name,
5553                                 copy ? host_ver->name_len : 0);
5554     unlock_user(host_ver->date, target_ver->date,
5555                                 copy ? host_ver->date_len : 0);
5556     unlock_user(host_ver->desc, target_ver->desc,
5557                                 copy ? host_ver->desc_len : 0);
5558 }
5559 
5560 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5561                                           struct target_drm_version *target_ver)
5562 {
5563     memset(host_ver, 0, sizeof(*host_ver));
5564 
5565     __get_user(host_ver->name_len, &target_ver->name_len);
5566     if (host_ver->name_len) {
5567         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5568                                    target_ver->name_len, 0);
5569         if (!host_ver->name) {
5570             return -EFAULT;
5571         }
5572     }
5573 
5574     __get_user(host_ver->date_len, &target_ver->date_len);
5575     if (host_ver->date_len) {
5576         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5577                                    target_ver->date_len, 0);
5578         if (!host_ver->date) {
5579             goto err;
5580         }
5581     }
5582 
5583     __get_user(host_ver->desc_len, &target_ver->desc_len);
5584     if (host_ver->desc_len) {
5585         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5586                                    target_ver->desc_len, 0);
5587         if (!host_ver->desc) {
5588             goto err;
5589         }
5590     }
5591 
5592     return 0;
5593 err:
5594     unlock_drm_version(host_ver, target_ver, false);
5595     return -EFAULT;
5596 }
5597 
5598 static inline void host_to_target_drmversion(
5599                                           struct target_drm_version *target_ver,
5600                                           struct drm_version *host_ver)
5601 {
5602     __put_user(host_ver->version_major, &target_ver->version_major);
5603     __put_user(host_ver->version_minor, &target_ver->version_minor);
5604     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5605     __put_user(host_ver->name_len, &target_ver->name_len);
5606     __put_user(host_ver->date_len, &target_ver->date_len);
5607     __put_user(host_ver->desc_len, &target_ver->desc_len);
5608     unlock_drm_version(host_ver, target_ver, true);
5609 }
5610 
5611 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5612                              int fd, int cmd, abi_long arg)
5613 {
5614     struct drm_version *ver;
5615     struct target_drm_version *target_ver;
5616     abi_long ret;
5617 
5618     switch (ie->host_cmd) {
5619     case DRM_IOCTL_VERSION:
5620         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5621             return -TARGET_EFAULT;
5622         }
5623         ver = (struct drm_version *)buf_temp;
5624         ret = target_to_host_drmversion(ver, target_ver);
5625         if (!is_error(ret)) {
5626             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5627             if (is_error(ret)) {
5628                 unlock_drm_version(ver, target_ver, false);
5629             } else {
5630                 host_to_target_drmversion(target_ver, ver);
5631             }
5632         }
5633         unlock_user_struct(target_ver, arg, 0);
5634         return ret;
5635     }
5636     return -TARGET_ENOSYS;
5637 }
5638 
5639 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5640                                            struct drm_i915_getparam *gparam,
5641                                            int fd, abi_long arg)
5642 {
5643     abi_long ret;
5644     int value;
5645     struct target_drm_i915_getparam *target_gparam;
5646 
5647     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5648         return -TARGET_EFAULT;
5649     }
5650 
5651     __get_user(gparam->param, &target_gparam->param);
5652     gparam->value = &value;
5653     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5654     put_user_s32(value, target_gparam->value);
5655 
5656     unlock_user_struct(target_gparam, arg, 0);
5657     return ret;
5658 }
5659 
5660 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5661                                   int fd, int cmd, abi_long arg)
5662 {
5663     switch (ie->host_cmd) {
5664     case DRM_IOCTL_I915_GETPARAM:
5665         return do_ioctl_drm_i915_getparam(ie,
5666                                           (struct drm_i915_getparam *)buf_temp,
5667                                           fd, arg);
5668     default:
5669         return -TARGET_ENOSYS;
5670     }
5671 }
5672 
5673 #endif
5674 
5675 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5676                                         int fd, int cmd, abi_long arg)
5677 {
5678     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5679     struct tun_filter *target_filter;
5680     char *target_addr;
5681 
5682     assert(ie->access == IOC_W);
5683 
5684     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5685     if (!target_filter) {
5686         return -TARGET_EFAULT;
5687     }
5688     filter->flags = tswap16(target_filter->flags);
5689     filter->count = tswap16(target_filter->count);
5690     unlock_user(target_filter, arg, 0);
5691 
5692     if (filter->count) {
5693         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5694             MAX_STRUCT_SIZE) {
5695             return -TARGET_EFAULT;
5696         }
5697 
5698         target_addr = lock_user(VERIFY_READ,
5699                                 arg + offsetof(struct tun_filter, addr),
5700                                 filter->count * ETH_ALEN, 1);
5701         if (!target_addr) {
5702             return -TARGET_EFAULT;
5703         }
5704         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5705         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5706     }
5707 
5708     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5709 }
5710 
5711 IOCTLEntry ioctl_entries[] = {
5712 #define IOCTL(cmd, access, ...) \
5713     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5714 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5715     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5716 #define IOCTL_IGNORE(cmd) \
5717     { TARGET_ ## cmd, 0, #cmd },
5718 #include "ioctls.h"
5719     { 0, 0, },
5720 };
5721 
5722 /* ??? Implement proper locking for ioctls.  */
5723 /* do_ioctl() Must return target values and target errnos. */
5724 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5725 {
5726     const IOCTLEntry *ie;
5727     const argtype *arg_type;
5728     abi_long ret;
5729     uint8_t buf_temp[MAX_STRUCT_SIZE];
5730     int target_size;
5731     void *argptr;
5732 
5733     ie = ioctl_entries;
5734     for(;;) {
5735         if (ie->target_cmd == 0) {
5736             qemu_log_mask(
5737                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5738             return -TARGET_ENOSYS;
5739         }
5740         if (ie->target_cmd == cmd)
5741             break;
5742         ie++;
5743     }
5744     arg_type = ie->arg_type;
5745     if (ie->do_ioctl) {
5746         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5747     } else if (!ie->host_cmd) {
5748         /* Some architectures define BSD ioctls in their headers
5749            that are not implemented in Linux.  */
5750         return -TARGET_ENOSYS;
5751     }
5752 
5753     switch(arg_type[0]) {
5754     case TYPE_NULL:
5755         /* no argument */
5756         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5757         break;
5758     case TYPE_PTRVOID:
5759     case TYPE_INT:
5760     case TYPE_LONG:
5761     case TYPE_ULONG:
5762         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5763         break;
5764     case TYPE_PTR:
5765         arg_type++;
5766         target_size = thunk_type_size(arg_type, 0);
5767         switch(ie->access) {
5768         case IOC_R:
5769             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5770             if (!is_error(ret)) {
5771                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5772                 if (!argptr)
5773                     return -TARGET_EFAULT;
5774                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5775                 unlock_user(argptr, arg, target_size);
5776             }
5777             break;
5778         case IOC_W:
5779             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5780             if (!argptr)
5781                 return -TARGET_EFAULT;
5782             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5783             unlock_user(argptr, arg, 0);
5784             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5785             break;
5786         default:
5787         case IOC_RW:
5788             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5789             if (!argptr)
5790                 return -TARGET_EFAULT;
5791             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5792             unlock_user(argptr, arg, 0);
5793             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5794             if (!is_error(ret)) {
5795                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5796                 if (!argptr)
5797                     return -TARGET_EFAULT;
5798                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5799                 unlock_user(argptr, arg, target_size);
5800             }
5801             break;
5802         }
5803         break;
5804     default:
5805         qemu_log_mask(LOG_UNIMP,
5806                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5807                       (long)cmd, arg_type[0]);
5808         ret = -TARGET_ENOSYS;
5809         break;
5810     }
5811     return ret;
5812 }
5813 
5814 static const bitmask_transtbl iflag_tbl[] = {
5815         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5816         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5817         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5818         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5819         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5820         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5821         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5822         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5823         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5824         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5825         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5826         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5827         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5828         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5829         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5830         { 0, 0, 0, 0 }
5831 };
5832 
5833 static const bitmask_transtbl oflag_tbl[] = {
5834 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5835 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5836 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5837 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5838 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5839 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5840 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5841 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5842 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5843 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5844 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5845 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5846 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5847 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5848 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5849 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5850 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5851 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5852 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5853 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5854 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5855 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5856 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5857 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5858 	{ 0, 0, 0, 0 }
5859 };
5860 
5861 static const bitmask_transtbl cflag_tbl[] = {
5862 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5863 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5864 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5865 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5866 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5867 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5868 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5869 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5870 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5871 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5872 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5873 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5874 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5875 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5876 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5877 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5878 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5879 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5880 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5881 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5882 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5883 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5884 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5885 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5886 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5887 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5888 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5889 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5890 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5891 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5892 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5893 	{ 0, 0, 0, 0 }
5894 };
5895 
5896 static const bitmask_transtbl lflag_tbl[] = {
5897   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5898   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5899   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5900   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5901   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5902   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5903   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5904   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5905   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5906   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5907   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5908   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5909   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5910   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5911   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5912   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5913   { 0, 0, 0, 0 }
5914 };
5915 
5916 static void target_to_host_termios (void *dst, const void *src)
5917 {
5918     struct host_termios *host = dst;
5919     const struct target_termios *target = src;
5920 
5921     host->c_iflag =
5922         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5923     host->c_oflag =
5924         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5925     host->c_cflag =
5926         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5927     host->c_lflag =
5928         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5929     host->c_line = target->c_line;
5930 
5931     memset(host->c_cc, 0, sizeof(host->c_cc));
5932     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5933     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5934     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5935     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5936     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5937     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5938     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5939     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5940     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5941     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5942     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5943     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5944     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5945     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5946     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5947     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5948     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5949 }
5950 
5951 static void host_to_target_termios (void *dst, const void *src)
5952 {
5953     struct target_termios *target = dst;
5954     const struct host_termios *host = src;
5955 
5956     target->c_iflag =
5957         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5958     target->c_oflag =
5959         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5960     target->c_cflag =
5961         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5962     target->c_lflag =
5963         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5964     target->c_line = host->c_line;
5965 
5966     memset(target->c_cc, 0, sizeof(target->c_cc));
5967     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5968     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5969     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5970     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5971     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5972     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5973     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5974     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5975     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5976     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5977     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5978     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5979     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5980     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5981     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5982     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5983     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5984 }
5985 
5986 static const StructEntry struct_termios_def = {
5987     .convert = { host_to_target_termios, target_to_host_termios },
5988     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5989     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5990     .print = print_termios,
5991 };
5992 
5993 static const bitmask_transtbl mmap_flags_tbl[] = {
5994     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5995     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5996     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5997     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5998       MAP_ANONYMOUS, MAP_ANONYMOUS },
5999     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6000       MAP_GROWSDOWN, MAP_GROWSDOWN },
6001     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6002       MAP_DENYWRITE, MAP_DENYWRITE },
6003     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6004       MAP_EXECUTABLE, MAP_EXECUTABLE },
6005     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6006     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6007       MAP_NORESERVE, MAP_NORESERVE },
6008     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6009     /* MAP_STACK had been ignored by the kernel for quite some time.
6010        Recognize it for the target insofar as we do not want to pass
6011        it through to the host.  */
6012     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6013     { 0, 0, 0, 0 }
6014 };
6015 
6016 /*
6017  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6018  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6019  */
6020 #if defined(TARGET_I386)
6021 
6022 /* NOTE: there is really one LDT for all the threads */
6023 static uint8_t *ldt_table;
6024 
6025 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6026 {
6027     int size;
6028     void *p;
6029 
6030     if (!ldt_table)
6031         return 0;
6032     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6033     if (size > bytecount)
6034         size = bytecount;
6035     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6036     if (!p)
6037         return -TARGET_EFAULT;
6038     /* ??? Should this by byteswapped?  */
6039     memcpy(p, ldt_table, size);
6040     unlock_user(p, ptr, size);
6041     return size;
6042 }
6043 
6044 /* XXX: add locking support */
6045 static abi_long write_ldt(CPUX86State *env,
6046                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6047 {
6048     struct target_modify_ldt_ldt_s ldt_info;
6049     struct target_modify_ldt_ldt_s *target_ldt_info;
6050     int seg_32bit, contents, read_exec_only, limit_in_pages;
6051     int seg_not_present, useable, lm;
6052     uint32_t *lp, entry_1, entry_2;
6053 
6054     if (bytecount != sizeof(ldt_info))
6055         return -TARGET_EINVAL;
6056     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6057         return -TARGET_EFAULT;
6058     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6059     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6060     ldt_info.limit = tswap32(target_ldt_info->limit);
6061     ldt_info.flags = tswap32(target_ldt_info->flags);
6062     unlock_user_struct(target_ldt_info, ptr, 0);
6063 
6064     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6065         return -TARGET_EINVAL;
6066     seg_32bit = ldt_info.flags & 1;
6067     contents = (ldt_info.flags >> 1) & 3;
6068     read_exec_only = (ldt_info.flags >> 3) & 1;
6069     limit_in_pages = (ldt_info.flags >> 4) & 1;
6070     seg_not_present = (ldt_info.flags >> 5) & 1;
6071     useable = (ldt_info.flags >> 6) & 1;
6072 #ifdef TARGET_ABI32
6073     lm = 0;
6074 #else
6075     lm = (ldt_info.flags >> 7) & 1;
6076 #endif
6077     if (contents == 3) {
6078         if (oldmode)
6079             return -TARGET_EINVAL;
6080         if (seg_not_present == 0)
6081             return -TARGET_EINVAL;
6082     }
6083     /* allocate the LDT */
6084     if (!ldt_table) {
6085         env->ldt.base = target_mmap(0,
6086                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6087                                     PROT_READ|PROT_WRITE,
6088                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6089         if (env->ldt.base == -1)
6090             return -TARGET_ENOMEM;
6091         memset(g2h_untagged(env->ldt.base), 0,
6092                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6093         env->ldt.limit = 0xffff;
6094         ldt_table = g2h_untagged(env->ldt.base);
6095     }
6096 
6097     /* NOTE: same code as Linux kernel */
6098     /* Allow LDTs to be cleared by the user. */
6099     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6100         if (oldmode ||
6101             (contents == 0		&&
6102              read_exec_only == 1	&&
6103              seg_32bit == 0		&&
6104              limit_in_pages == 0	&&
6105              seg_not_present == 1	&&
6106              useable == 0 )) {
6107             entry_1 = 0;
6108             entry_2 = 0;
6109             goto install;
6110         }
6111     }
6112 
6113     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6114         (ldt_info.limit & 0x0ffff);
6115     entry_2 = (ldt_info.base_addr & 0xff000000) |
6116         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6117         (ldt_info.limit & 0xf0000) |
6118         ((read_exec_only ^ 1) << 9) |
6119         (contents << 10) |
6120         ((seg_not_present ^ 1) << 15) |
6121         (seg_32bit << 22) |
6122         (limit_in_pages << 23) |
6123         (lm << 21) |
6124         0x7000;
6125     if (!oldmode)
6126         entry_2 |= (useable << 20);
6127 
6128     /* Install the new entry ...  */
6129 install:
6130     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6131     lp[0] = tswap32(entry_1);
6132     lp[1] = tswap32(entry_2);
6133     return 0;
6134 }
6135 
6136 /* specific and weird i386 syscalls */
6137 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6138                               unsigned long bytecount)
6139 {
6140     abi_long ret;
6141 
6142     switch (func) {
6143     case 0:
6144         ret = read_ldt(ptr, bytecount);
6145         break;
6146     case 1:
6147         ret = write_ldt(env, ptr, bytecount, 1);
6148         break;
6149     case 0x11:
6150         ret = write_ldt(env, ptr, bytecount, 0);
6151         break;
6152     default:
6153         ret = -TARGET_ENOSYS;
6154         break;
6155     }
6156     return ret;
6157 }
6158 
6159 #if defined(TARGET_ABI32)
6160 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6161 {
6162     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6163     struct target_modify_ldt_ldt_s ldt_info;
6164     struct target_modify_ldt_ldt_s *target_ldt_info;
6165     int seg_32bit, contents, read_exec_only, limit_in_pages;
6166     int seg_not_present, useable, lm;
6167     uint32_t *lp, entry_1, entry_2;
6168     int i;
6169 
6170     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6171     if (!target_ldt_info)
6172         return -TARGET_EFAULT;
6173     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6174     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6175     ldt_info.limit = tswap32(target_ldt_info->limit);
6176     ldt_info.flags = tswap32(target_ldt_info->flags);
6177     if (ldt_info.entry_number == -1) {
6178         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6179             if (gdt_table[i] == 0) {
6180                 ldt_info.entry_number = i;
6181                 target_ldt_info->entry_number = tswap32(i);
6182                 break;
6183             }
6184         }
6185     }
6186     unlock_user_struct(target_ldt_info, ptr, 1);
6187 
6188     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6189         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6190            return -TARGET_EINVAL;
6191     seg_32bit = ldt_info.flags & 1;
6192     contents = (ldt_info.flags >> 1) & 3;
6193     read_exec_only = (ldt_info.flags >> 3) & 1;
6194     limit_in_pages = (ldt_info.flags >> 4) & 1;
6195     seg_not_present = (ldt_info.flags >> 5) & 1;
6196     useable = (ldt_info.flags >> 6) & 1;
6197 #ifdef TARGET_ABI32
6198     lm = 0;
6199 #else
6200     lm = (ldt_info.flags >> 7) & 1;
6201 #endif
6202 
6203     if (contents == 3) {
6204         if (seg_not_present == 0)
6205             return -TARGET_EINVAL;
6206     }
6207 
6208     /* NOTE: same code as Linux kernel */
6209     /* Allow LDTs to be cleared by the user. */
6210     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6211         if ((contents == 0             &&
6212              read_exec_only == 1       &&
6213              seg_32bit == 0            &&
6214              limit_in_pages == 0       &&
6215              seg_not_present == 1      &&
6216              useable == 0 )) {
6217             entry_1 = 0;
6218             entry_2 = 0;
6219             goto install;
6220         }
6221     }
6222 
6223     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6224         (ldt_info.limit & 0x0ffff);
6225     entry_2 = (ldt_info.base_addr & 0xff000000) |
6226         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6227         (ldt_info.limit & 0xf0000) |
6228         ((read_exec_only ^ 1) << 9) |
6229         (contents << 10) |
6230         ((seg_not_present ^ 1) << 15) |
6231         (seg_32bit << 22) |
6232         (limit_in_pages << 23) |
6233         (useable << 20) |
6234         (lm << 21) |
6235         0x7000;
6236 
6237     /* Install the new entry ...  */
6238 install:
6239     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6240     lp[0] = tswap32(entry_1);
6241     lp[1] = tswap32(entry_2);
6242     return 0;
6243 }
6244 
6245 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6246 {
6247     struct target_modify_ldt_ldt_s *target_ldt_info;
6248     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6249     uint32_t base_addr, limit, flags;
6250     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6251     int seg_not_present, useable, lm;
6252     uint32_t *lp, entry_1, entry_2;
6253 
6254     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6255     if (!target_ldt_info)
6256         return -TARGET_EFAULT;
6257     idx = tswap32(target_ldt_info->entry_number);
6258     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6259         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6260         unlock_user_struct(target_ldt_info, ptr, 1);
6261         return -TARGET_EINVAL;
6262     }
6263     lp = (uint32_t *)(gdt_table + idx);
6264     entry_1 = tswap32(lp[0]);
6265     entry_2 = tswap32(lp[1]);
6266 
6267     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6268     contents = (entry_2 >> 10) & 3;
6269     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6270     seg_32bit = (entry_2 >> 22) & 1;
6271     limit_in_pages = (entry_2 >> 23) & 1;
6272     useable = (entry_2 >> 20) & 1;
6273 #ifdef TARGET_ABI32
6274     lm = 0;
6275 #else
6276     lm = (entry_2 >> 21) & 1;
6277 #endif
6278     flags = (seg_32bit << 0) | (contents << 1) |
6279         (read_exec_only << 3) | (limit_in_pages << 4) |
6280         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6281     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6282     base_addr = (entry_1 >> 16) |
6283         (entry_2 & 0xff000000) |
6284         ((entry_2 & 0xff) << 16);
6285     target_ldt_info->base_addr = tswapal(base_addr);
6286     target_ldt_info->limit = tswap32(limit);
6287     target_ldt_info->flags = tswap32(flags);
6288     unlock_user_struct(target_ldt_info, ptr, 1);
6289     return 0;
6290 }
6291 
6292 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6293 {
6294     return -TARGET_ENOSYS;
6295 }
6296 #else
6297 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6298 {
6299     abi_long ret = 0;
6300     abi_ulong val;
6301     int idx;
6302 
6303     switch(code) {
6304     case TARGET_ARCH_SET_GS:
6305     case TARGET_ARCH_SET_FS:
6306         if (code == TARGET_ARCH_SET_GS)
6307             idx = R_GS;
6308         else
6309             idx = R_FS;
6310         cpu_x86_load_seg(env, idx, 0);
6311         env->segs[idx].base = addr;
6312         break;
6313     case TARGET_ARCH_GET_GS:
6314     case TARGET_ARCH_GET_FS:
6315         if (code == TARGET_ARCH_GET_GS)
6316             idx = R_GS;
6317         else
6318             idx = R_FS;
6319         val = env->segs[idx].base;
6320         if (put_user(val, addr, abi_ulong))
6321             ret = -TARGET_EFAULT;
6322         break;
6323     default:
6324         ret = -TARGET_EINVAL;
6325         break;
6326     }
6327     return ret;
6328 }
6329 #endif /* defined(TARGET_ABI32 */
6330 #endif /* defined(TARGET_I386) */
6331 
6332 /*
6333  * These constants are generic.  Supply any that are missing from the host.
6334  */
6335 #ifndef PR_SET_NAME
6336 # define PR_SET_NAME    15
6337 # define PR_GET_NAME    16
6338 #endif
6339 #ifndef PR_SET_FP_MODE
6340 # define PR_SET_FP_MODE 45
6341 # define PR_GET_FP_MODE 46
6342 # define PR_FP_MODE_FR   (1 << 0)
6343 # define PR_FP_MODE_FRE  (1 << 1)
6344 #endif
6345 #ifndef PR_SVE_SET_VL
6346 # define PR_SVE_SET_VL  50
6347 # define PR_SVE_GET_VL  51
6348 # define PR_SVE_VL_LEN_MASK  0xffff
6349 # define PR_SVE_VL_INHERIT   (1 << 17)
6350 #endif
6351 #ifndef PR_PAC_RESET_KEYS
6352 # define PR_PAC_RESET_KEYS  54
6353 # define PR_PAC_APIAKEY   (1 << 0)
6354 # define PR_PAC_APIBKEY   (1 << 1)
6355 # define PR_PAC_APDAKEY   (1 << 2)
6356 # define PR_PAC_APDBKEY   (1 << 3)
6357 # define PR_PAC_APGAKEY   (1 << 4)
6358 #endif
6359 #ifndef PR_SET_TAGGED_ADDR_CTRL
6360 # define PR_SET_TAGGED_ADDR_CTRL 55
6361 # define PR_GET_TAGGED_ADDR_CTRL 56
6362 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6363 #endif
6364 #ifndef PR_MTE_TCF_SHIFT
6365 # define PR_MTE_TCF_SHIFT       1
6366 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6367 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6368 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6369 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6370 # define PR_MTE_TAG_SHIFT       3
6371 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6372 #endif
6373 #ifndef PR_SET_IO_FLUSHER
6374 # define PR_SET_IO_FLUSHER 57
6375 # define PR_GET_IO_FLUSHER 58
6376 #endif
6377 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6378 # define PR_SET_SYSCALL_USER_DISPATCH 59
6379 #endif
6380 #ifndef PR_SME_SET_VL
6381 # define PR_SME_SET_VL  63
6382 # define PR_SME_GET_VL  64
6383 # define PR_SME_VL_LEN_MASK  0xffff
6384 # define PR_SME_VL_INHERIT   (1 << 17)
6385 #endif
6386 
6387 #include "target_prctl.h"
6388 
6389 static abi_long do_prctl_inval0(CPUArchState *env)
6390 {
6391     return -TARGET_EINVAL;
6392 }
6393 
6394 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6395 {
6396     return -TARGET_EINVAL;
6397 }
6398 
6399 #ifndef do_prctl_get_fp_mode
6400 #define do_prctl_get_fp_mode do_prctl_inval0
6401 #endif
6402 #ifndef do_prctl_set_fp_mode
6403 #define do_prctl_set_fp_mode do_prctl_inval1
6404 #endif
6405 #ifndef do_prctl_sve_get_vl
6406 #define do_prctl_sve_get_vl do_prctl_inval0
6407 #endif
6408 #ifndef do_prctl_sve_set_vl
6409 #define do_prctl_sve_set_vl do_prctl_inval1
6410 #endif
6411 #ifndef do_prctl_reset_keys
6412 #define do_prctl_reset_keys do_prctl_inval1
6413 #endif
6414 #ifndef do_prctl_set_tagged_addr_ctrl
6415 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6416 #endif
6417 #ifndef do_prctl_get_tagged_addr_ctrl
6418 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6419 #endif
6420 #ifndef do_prctl_get_unalign
6421 #define do_prctl_get_unalign do_prctl_inval1
6422 #endif
6423 #ifndef do_prctl_set_unalign
6424 #define do_prctl_set_unalign do_prctl_inval1
6425 #endif
6426 #ifndef do_prctl_sme_get_vl
6427 #define do_prctl_sme_get_vl do_prctl_inval0
6428 #endif
6429 #ifndef do_prctl_sme_set_vl
6430 #define do_prctl_sme_set_vl do_prctl_inval1
6431 #endif
6432 
6433 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6434                          abi_long arg3, abi_long arg4, abi_long arg5)
6435 {
6436     abi_long ret;
6437 
6438     switch (option) {
6439     case PR_GET_PDEATHSIG:
6440         {
6441             int deathsig;
6442             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6443                                   arg3, arg4, arg5));
6444             if (!is_error(ret) &&
6445                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6446                 return -TARGET_EFAULT;
6447             }
6448             return ret;
6449         }
6450     case PR_SET_PDEATHSIG:
6451         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6452                                arg3, arg4, arg5));
6453     case PR_GET_NAME:
6454         {
6455             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6456             if (!name) {
6457                 return -TARGET_EFAULT;
6458             }
6459             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6460                                   arg3, arg4, arg5));
6461             unlock_user(name, arg2, 16);
6462             return ret;
6463         }
6464     case PR_SET_NAME:
6465         {
6466             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6467             if (!name) {
6468                 return -TARGET_EFAULT;
6469             }
6470             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6471                                   arg3, arg4, arg5));
6472             unlock_user(name, arg2, 0);
6473             return ret;
6474         }
6475     case PR_GET_FP_MODE:
6476         return do_prctl_get_fp_mode(env);
6477     case PR_SET_FP_MODE:
6478         return do_prctl_set_fp_mode(env, arg2);
6479     case PR_SVE_GET_VL:
6480         return do_prctl_sve_get_vl(env);
6481     case PR_SVE_SET_VL:
6482         return do_prctl_sve_set_vl(env, arg2);
6483     case PR_SME_GET_VL:
6484         return do_prctl_sme_get_vl(env);
6485     case PR_SME_SET_VL:
6486         return do_prctl_sme_set_vl(env, arg2);
6487     case PR_PAC_RESET_KEYS:
6488         if (arg3 || arg4 || arg5) {
6489             return -TARGET_EINVAL;
6490         }
6491         return do_prctl_reset_keys(env, arg2);
6492     case PR_SET_TAGGED_ADDR_CTRL:
6493         if (arg3 || arg4 || arg5) {
6494             return -TARGET_EINVAL;
6495         }
6496         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6497     case PR_GET_TAGGED_ADDR_CTRL:
6498         if (arg2 || arg3 || arg4 || arg5) {
6499             return -TARGET_EINVAL;
6500         }
6501         return do_prctl_get_tagged_addr_ctrl(env);
6502 
6503     case PR_GET_UNALIGN:
6504         return do_prctl_get_unalign(env, arg2);
6505     case PR_SET_UNALIGN:
6506         return do_prctl_set_unalign(env, arg2);
6507 
6508     case PR_CAP_AMBIENT:
6509     case PR_CAPBSET_READ:
6510     case PR_CAPBSET_DROP:
6511     case PR_GET_DUMPABLE:
6512     case PR_SET_DUMPABLE:
6513     case PR_GET_KEEPCAPS:
6514     case PR_SET_KEEPCAPS:
6515     case PR_GET_SECUREBITS:
6516     case PR_SET_SECUREBITS:
6517     case PR_GET_TIMING:
6518     case PR_SET_TIMING:
6519     case PR_GET_TIMERSLACK:
6520     case PR_SET_TIMERSLACK:
6521     case PR_MCE_KILL:
6522     case PR_MCE_KILL_GET:
6523     case PR_GET_NO_NEW_PRIVS:
6524     case PR_SET_NO_NEW_PRIVS:
6525     case PR_GET_IO_FLUSHER:
6526     case PR_SET_IO_FLUSHER:
6527         /* Some prctl options have no pointer arguments and we can pass on. */
6528         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6529 
6530     case PR_GET_CHILD_SUBREAPER:
6531     case PR_SET_CHILD_SUBREAPER:
6532     case PR_GET_SPECULATION_CTRL:
6533     case PR_SET_SPECULATION_CTRL:
6534     case PR_GET_TID_ADDRESS:
6535         /* TODO */
6536         return -TARGET_EINVAL;
6537 
6538     case PR_GET_FPEXC:
6539     case PR_SET_FPEXC:
6540         /* Was used for SPE on PowerPC. */
6541         return -TARGET_EINVAL;
6542 
6543     case PR_GET_ENDIAN:
6544     case PR_SET_ENDIAN:
6545     case PR_GET_FPEMU:
6546     case PR_SET_FPEMU:
6547     case PR_SET_MM:
6548     case PR_GET_SECCOMP:
6549     case PR_SET_SECCOMP:
6550     case PR_SET_SYSCALL_USER_DISPATCH:
6551     case PR_GET_THP_DISABLE:
6552     case PR_SET_THP_DISABLE:
6553     case PR_GET_TSC:
6554     case PR_SET_TSC:
6555         /* Disable to prevent the target disabling stuff we need. */
6556         return -TARGET_EINVAL;
6557 
6558     default:
6559         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6560                       option);
6561         return -TARGET_EINVAL;
6562     }
6563 }
6564 
6565 #define NEW_STACK_SIZE 0x40000
6566 
6567 
6568 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6569 typedef struct {
6570     CPUArchState *env;
6571     pthread_mutex_t mutex;
6572     pthread_cond_t cond;
6573     pthread_t thread;
6574     uint32_t tid;
6575     abi_ulong child_tidptr;
6576     abi_ulong parent_tidptr;
6577     sigset_t sigmask;
6578 } new_thread_info;
6579 
6580 static void *clone_func(void *arg)
6581 {
6582     new_thread_info *info = arg;
6583     CPUArchState *env;
6584     CPUState *cpu;
6585     TaskState *ts;
6586 
6587     rcu_register_thread();
6588     tcg_register_thread();
6589     env = info->env;
6590     cpu = env_cpu(env);
6591     thread_cpu = cpu;
6592     ts = (TaskState *)cpu->opaque;
6593     info->tid = sys_gettid();
6594     task_settid(ts);
6595     if (info->child_tidptr)
6596         put_user_u32(info->tid, info->child_tidptr);
6597     if (info->parent_tidptr)
6598         put_user_u32(info->tid, info->parent_tidptr);
6599     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6600     /* Enable signals.  */
6601     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6602     /* Signal to the parent that we're ready.  */
6603     pthread_mutex_lock(&info->mutex);
6604     pthread_cond_broadcast(&info->cond);
6605     pthread_mutex_unlock(&info->mutex);
6606     /* Wait until the parent has finished initializing the tls state.  */
6607     pthread_mutex_lock(&clone_lock);
6608     pthread_mutex_unlock(&clone_lock);
6609     cpu_loop(env);
6610     /* never exits */
6611     return NULL;
6612 }
6613 
6614 /* do_fork() Must return host values and target errnos (unlike most
6615    do_*() functions). */
6616 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6617                    abi_ulong parent_tidptr, target_ulong newtls,
6618                    abi_ulong child_tidptr)
6619 {
6620     CPUState *cpu = env_cpu(env);
6621     int ret;
6622     TaskState *ts;
6623     CPUState *new_cpu;
6624     CPUArchState *new_env;
6625     sigset_t sigmask;
6626 
6627     flags &= ~CLONE_IGNORED_FLAGS;
6628 
6629     /* Emulate vfork() with fork() */
6630     if (flags & CLONE_VFORK)
6631         flags &= ~(CLONE_VFORK | CLONE_VM);
6632 
6633     if (flags & CLONE_VM) {
6634         TaskState *parent_ts = (TaskState *)cpu->opaque;
6635         new_thread_info info;
6636         pthread_attr_t attr;
6637 
6638         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6639             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6640             return -TARGET_EINVAL;
6641         }
6642 
6643         ts = g_new0(TaskState, 1);
6644         init_task_state(ts);
6645 
6646         /* Grab a mutex so that thread setup appears atomic.  */
6647         pthread_mutex_lock(&clone_lock);
6648 
6649         /*
6650          * If this is our first additional thread, we need to ensure we
6651          * generate code for parallel execution and flush old translations.
6652          * Do this now so that the copy gets CF_PARALLEL too.
6653          */
6654         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6655             cpu->tcg_cflags |= CF_PARALLEL;
6656             tb_flush(cpu);
6657         }
6658 
6659         /* we create a new CPU instance. */
6660         new_env = cpu_copy(env);
6661         /* Init regs that differ from the parent.  */
6662         cpu_clone_regs_child(new_env, newsp, flags);
6663         cpu_clone_regs_parent(env, flags);
6664         new_cpu = env_cpu(new_env);
6665         new_cpu->opaque = ts;
6666         ts->bprm = parent_ts->bprm;
6667         ts->info = parent_ts->info;
6668         ts->signal_mask = parent_ts->signal_mask;
6669 
6670         if (flags & CLONE_CHILD_CLEARTID) {
6671             ts->child_tidptr = child_tidptr;
6672         }
6673 
6674         if (flags & CLONE_SETTLS) {
6675             cpu_set_tls (new_env, newtls);
6676         }
6677 
6678         memset(&info, 0, sizeof(info));
6679         pthread_mutex_init(&info.mutex, NULL);
6680         pthread_mutex_lock(&info.mutex);
6681         pthread_cond_init(&info.cond, NULL);
6682         info.env = new_env;
6683         if (flags & CLONE_CHILD_SETTID) {
6684             info.child_tidptr = child_tidptr;
6685         }
6686         if (flags & CLONE_PARENT_SETTID) {
6687             info.parent_tidptr = parent_tidptr;
6688         }
6689 
6690         ret = pthread_attr_init(&attr);
6691         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6692         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6693         /* It is not safe to deliver signals until the child has finished
6694            initializing, so temporarily block all signals.  */
6695         sigfillset(&sigmask);
6696         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6697         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6698 
6699         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6700         /* TODO: Free new CPU state if thread creation failed.  */
6701 
6702         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6703         pthread_attr_destroy(&attr);
6704         if (ret == 0) {
6705             /* Wait for the child to initialize.  */
6706             pthread_cond_wait(&info.cond, &info.mutex);
6707             ret = info.tid;
6708         } else {
6709             ret = -1;
6710         }
6711         pthread_mutex_unlock(&info.mutex);
6712         pthread_cond_destroy(&info.cond);
6713         pthread_mutex_destroy(&info.mutex);
6714         pthread_mutex_unlock(&clone_lock);
6715     } else {
6716         /* if no CLONE_VM, we consider it is a fork */
6717         if (flags & CLONE_INVALID_FORK_FLAGS) {
6718             return -TARGET_EINVAL;
6719         }
6720 
6721         /* We can't support custom termination signals */
6722         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6723             return -TARGET_EINVAL;
6724         }
6725 
6726         if (block_signals()) {
6727             return -QEMU_ERESTARTSYS;
6728         }
6729 
6730         fork_start();
6731         ret = fork();
6732         if (ret == 0) {
6733             /* Child Process.  */
6734             cpu_clone_regs_child(env, newsp, flags);
6735             fork_end(1);
6736             /* There is a race condition here.  The parent process could
6737                theoretically read the TID in the child process before the child
6738                tid is set.  This would require using either ptrace
6739                (not implemented) or having *_tidptr to point at a shared memory
6740                mapping.  We can't repeat the spinlock hack used above because
6741                the child process gets its own copy of the lock.  */
6742             if (flags & CLONE_CHILD_SETTID)
6743                 put_user_u32(sys_gettid(), child_tidptr);
6744             if (flags & CLONE_PARENT_SETTID)
6745                 put_user_u32(sys_gettid(), parent_tidptr);
6746             ts = (TaskState *)cpu->opaque;
6747             if (flags & CLONE_SETTLS)
6748                 cpu_set_tls (env, newtls);
6749             if (flags & CLONE_CHILD_CLEARTID)
6750                 ts->child_tidptr = child_tidptr;
6751         } else {
6752             cpu_clone_regs_parent(env, flags);
6753             fork_end(0);
6754         }
6755         g_assert(!cpu_in_exclusive_context(cpu));
6756     }
6757     return ret;
6758 }
6759 
6760 /* warning : doesn't handle linux specific flags... */
6761 static int target_to_host_fcntl_cmd(int cmd)
6762 {
6763     int ret;
6764 
6765     switch(cmd) {
6766     case TARGET_F_DUPFD:
6767     case TARGET_F_GETFD:
6768     case TARGET_F_SETFD:
6769     case TARGET_F_GETFL:
6770     case TARGET_F_SETFL:
6771     case TARGET_F_OFD_GETLK:
6772     case TARGET_F_OFD_SETLK:
6773     case TARGET_F_OFD_SETLKW:
6774         ret = cmd;
6775         break;
6776     case TARGET_F_GETLK:
6777         ret = F_GETLK64;
6778         break;
6779     case TARGET_F_SETLK:
6780         ret = F_SETLK64;
6781         break;
6782     case TARGET_F_SETLKW:
6783         ret = F_SETLKW64;
6784         break;
6785     case TARGET_F_GETOWN:
6786         ret = F_GETOWN;
6787         break;
6788     case TARGET_F_SETOWN:
6789         ret = F_SETOWN;
6790         break;
6791     case TARGET_F_GETSIG:
6792         ret = F_GETSIG;
6793         break;
6794     case TARGET_F_SETSIG:
6795         ret = F_SETSIG;
6796         break;
6797 #if TARGET_ABI_BITS == 32
6798     case TARGET_F_GETLK64:
6799         ret = F_GETLK64;
6800         break;
6801     case TARGET_F_SETLK64:
6802         ret = F_SETLK64;
6803         break;
6804     case TARGET_F_SETLKW64:
6805         ret = F_SETLKW64;
6806         break;
6807 #endif
6808     case TARGET_F_SETLEASE:
6809         ret = F_SETLEASE;
6810         break;
6811     case TARGET_F_GETLEASE:
6812         ret = F_GETLEASE;
6813         break;
6814 #ifdef F_DUPFD_CLOEXEC
6815     case TARGET_F_DUPFD_CLOEXEC:
6816         ret = F_DUPFD_CLOEXEC;
6817         break;
6818 #endif
6819     case TARGET_F_NOTIFY:
6820         ret = F_NOTIFY;
6821         break;
6822 #ifdef F_GETOWN_EX
6823     case TARGET_F_GETOWN_EX:
6824         ret = F_GETOWN_EX;
6825         break;
6826 #endif
6827 #ifdef F_SETOWN_EX
6828     case TARGET_F_SETOWN_EX:
6829         ret = F_SETOWN_EX;
6830         break;
6831 #endif
6832 #ifdef F_SETPIPE_SZ
6833     case TARGET_F_SETPIPE_SZ:
6834         ret = F_SETPIPE_SZ;
6835         break;
6836     case TARGET_F_GETPIPE_SZ:
6837         ret = F_GETPIPE_SZ;
6838         break;
6839 #endif
6840 #ifdef F_ADD_SEALS
6841     case TARGET_F_ADD_SEALS:
6842         ret = F_ADD_SEALS;
6843         break;
6844     case TARGET_F_GET_SEALS:
6845         ret = F_GET_SEALS;
6846         break;
6847 #endif
6848     default:
6849         ret = -TARGET_EINVAL;
6850         break;
6851     }
6852 
6853 #if defined(__powerpc64__)
6854     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6855      * is not supported by kernel. The glibc fcntl call actually adjusts
6856      * them to 5, 6 and 7 before making the syscall(). Since we make the
6857      * syscall directly, adjust to what is supported by the kernel.
6858      */
6859     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6860         ret -= F_GETLK64 - 5;
6861     }
6862 #endif
6863 
6864     return ret;
6865 }
6866 
6867 #define FLOCK_TRANSTBL \
6868     switch (type) { \
6869     TRANSTBL_CONVERT(F_RDLCK); \
6870     TRANSTBL_CONVERT(F_WRLCK); \
6871     TRANSTBL_CONVERT(F_UNLCK); \
6872     }
6873 
6874 static int target_to_host_flock(int type)
6875 {
6876 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6877     FLOCK_TRANSTBL
6878 #undef  TRANSTBL_CONVERT
6879     return -TARGET_EINVAL;
6880 }
6881 
6882 static int host_to_target_flock(int type)
6883 {
6884 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6885     FLOCK_TRANSTBL
6886 #undef  TRANSTBL_CONVERT
6887     /* if we don't know how to convert the value coming
6888      * from the host we copy to the target field as-is
6889      */
6890     return type;
6891 }
6892 
6893 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6894                                             abi_ulong target_flock_addr)
6895 {
6896     struct target_flock *target_fl;
6897     int l_type;
6898 
6899     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6900         return -TARGET_EFAULT;
6901     }
6902 
6903     __get_user(l_type, &target_fl->l_type);
6904     l_type = target_to_host_flock(l_type);
6905     if (l_type < 0) {
6906         return l_type;
6907     }
6908     fl->l_type = l_type;
6909     __get_user(fl->l_whence, &target_fl->l_whence);
6910     __get_user(fl->l_start, &target_fl->l_start);
6911     __get_user(fl->l_len, &target_fl->l_len);
6912     __get_user(fl->l_pid, &target_fl->l_pid);
6913     unlock_user_struct(target_fl, target_flock_addr, 0);
6914     return 0;
6915 }
6916 
6917 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6918                                           const struct flock64 *fl)
6919 {
6920     struct target_flock *target_fl;
6921     short l_type;
6922 
6923     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6924         return -TARGET_EFAULT;
6925     }
6926 
6927     l_type = host_to_target_flock(fl->l_type);
6928     __put_user(l_type, &target_fl->l_type);
6929     __put_user(fl->l_whence, &target_fl->l_whence);
6930     __put_user(fl->l_start, &target_fl->l_start);
6931     __put_user(fl->l_len, &target_fl->l_len);
6932     __put_user(fl->l_pid, &target_fl->l_pid);
6933     unlock_user_struct(target_fl, target_flock_addr, 1);
6934     return 0;
6935 }
6936 
6937 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6938 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6939 
6940 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6941 struct target_oabi_flock64 {
6942     abi_short l_type;
6943     abi_short l_whence;
6944     abi_llong l_start;
6945     abi_llong l_len;
6946     abi_int   l_pid;
6947 } QEMU_PACKED;
6948 
6949 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6950                                                    abi_ulong target_flock_addr)
6951 {
6952     struct target_oabi_flock64 *target_fl;
6953     int l_type;
6954 
6955     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6956         return -TARGET_EFAULT;
6957     }
6958 
6959     __get_user(l_type, &target_fl->l_type);
6960     l_type = target_to_host_flock(l_type);
6961     if (l_type < 0) {
6962         return l_type;
6963     }
6964     fl->l_type = l_type;
6965     __get_user(fl->l_whence, &target_fl->l_whence);
6966     __get_user(fl->l_start, &target_fl->l_start);
6967     __get_user(fl->l_len, &target_fl->l_len);
6968     __get_user(fl->l_pid, &target_fl->l_pid);
6969     unlock_user_struct(target_fl, target_flock_addr, 0);
6970     return 0;
6971 }
6972 
6973 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6974                                                  const struct flock64 *fl)
6975 {
6976     struct target_oabi_flock64 *target_fl;
6977     short l_type;
6978 
6979     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6980         return -TARGET_EFAULT;
6981     }
6982 
6983     l_type = host_to_target_flock(fl->l_type);
6984     __put_user(l_type, &target_fl->l_type);
6985     __put_user(fl->l_whence, &target_fl->l_whence);
6986     __put_user(fl->l_start, &target_fl->l_start);
6987     __put_user(fl->l_len, &target_fl->l_len);
6988     __put_user(fl->l_pid, &target_fl->l_pid);
6989     unlock_user_struct(target_fl, target_flock_addr, 1);
6990     return 0;
6991 }
6992 #endif
6993 
6994 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6995                                               abi_ulong target_flock_addr)
6996 {
6997     struct target_flock64 *target_fl;
6998     int l_type;
6999 
7000     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7001         return -TARGET_EFAULT;
7002     }
7003 
7004     __get_user(l_type, &target_fl->l_type);
7005     l_type = target_to_host_flock(l_type);
7006     if (l_type < 0) {
7007         return l_type;
7008     }
7009     fl->l_type = l_type;
7010     __get_user(fl->l_whence, &target_fl->l_whence);
7011     __get_user(fl->l_start, &target_fl->l_start);
7012     __get_user(fl->l_len, &target_fl->l_len);
7013     __get_user(fl->l_pid, &target_fl->l_pid);
7014     unlock_user_struct(target_fl, target_flock_addr, 0);
7015     return 0;
7016 }
7017 
7018 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7019                                             const struct flock64 *fl)
7020 {
7021     struct target_flock64 *target_fl;
7022     short l_type;
7023 
7024     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7025         return -TARGET_EFAULT;
7026     }
7027 
7028     l_type = host_to_target_flock(fl->l_type);
7029     __put_user(l_type, &target_fl->l_type);
7030     __put_user(fl->l_whence, &target_fl->l_whence);
7031     __put_user(fl->l_start, &target_fl->l_start);
7032     __put_user(fl->l_len, &target_fl->l_len);
7033     __put_user(fl->l_pid, &target_fl->l_pid);
7034     unlock_user_struct(target_fl, target_flock_addr, 1);
7035     return 0;
7036 }
7037 
7038 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7039 {
7040     struct flock64 fl64;
7041 #ifdef F_GETOWN_EX
7042     struct f_owner_ex fox;
7043     struct target_f_owner_ex *target_fox;
7044 #endif
7045     abi_long ret;
7046     int host_cmd = target_to_host_fcntl_cmd(cmd);
7047 
7048     if (host_cmd == -TARGET_EINVAL)
7049 	    return host_cmd;
7050 
7051     switch(cmd) {
7052     case TARGET_F_GETLK:
7053         ret = copy_from_user_flock(&fl64, arg);
7054         if (ret) {
7055             return ret;
7056         }
7057         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7058         if (ret == 0) {
7059             ret = copy_to_user_flock(arg, &fl64);
7060         }
7061         break;
7062 
7063     case TARGET_F_SETLK:
7064     case TARGET_F_SETLKW:
7065         ret = copy_from_user_flock(&fl64, arg);
7066         if (ret) {
7067             return ret;
7068         }
7069         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7070         break;
7071 
7072     case TARGET_F_GETLK64:
7073     case TARGET_F_OFD_GETLK:
7074         ret = copy_from_user_flock64(&fl64, arg);
7075         if (ret) {
7076             return ret;
7077         }
7078         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7079         if (ret == 0) {
7080             ret = copy_to_user_flock64(arg, &fl64);
7081         }
7082         break;
7083     case TARGET_F_SETLK64:
7084     case TARGET_F_SETLKW64:
7085     case TARGET_F_OFD_SETLK:
7086     case TARGET_F_OFD_SETLKW:
7087         ret = copy_from_user_flock64(&fl64, arg);
7088         if (ret) {
7089             return ret;
7090         }
7091         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7092         break;
7093 
7094     case TARGET_F_GETFL:
7095         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7096         if (ret >= 0) {
7097             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7098         }
7099         break;
7100 
7101     case TARGET_F_SETFL:
7102         ret = get_errno(safe_fcntl(fd, host_cmd,
7103                                    target_to_host_bitmask(arg,
7104                                                           fcntl_flags_tbl)));
7105         break;
7106 
7107 #ifdef F_GETOWN_EX
7108     case TARGET_F_GETOWN_EX:
7109         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7110         if (ret >= 0) {
7111             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7112                 return -TARGET_EFAULT;
7113             target_fox->type = tswap32(fox.type);
7114             target_fox->pid = tswap32(fox.pid);
7115             unlock_user_struct(target_fox, arg, 1);
7116         }
7117         break;
7118 #endif
7119 
7120 #ifdef F_SETOWN_EX
7121     case TARGET_F_SETOWN_EX:
7122         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7123             return -TARGET_EFAULT;
7124         fox.type = tswap32(target_fox->type);
7125         fox.pid = tswap32(target_fox->pid);
7126         unlock_user_struct(target_fox, arg, 0);
7127         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7128         break;
7129 #endif
7130 
7131     case TARGET_F_SETSIG:
7132         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7133         break;
7134 
7135     case TARGET_F_GETSIG:
7136         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7137         break;
7138 
7139     case TARGET_F_SETOWN:
7140     case TARGET_F_GETOWN:
7141     case TARGET_F_SETLEASE:
7142     case TARGET_F_GETLEASE:
7143     case TARGET_F_SETPIPE_SZ:
7144     case TARGET_F_GETPIPE_SZ:
7145     case TARGET_F_ADD_SEALS:
7146     case TARGET_F_GET_SEALS:
7147         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7148         break;
7149 
7150     default:
7151         ret = get_errno(safe_fcntl(fd, cmd, arg));
7152         break;
7153     }
7154     return ret;
7155 }
7156 
7157 #ifdef USE_UID16
7158 
7159 static inline int high2lowuid(int uid)
7160 {
7161     if (uid > 65535)
7162         return 65534;
7163     else
7164         return uid;
7165 }
7166 
7167 static inline int high2lowgid(int gid)
7168 {
7169     if (gid > 65535)
7170         return 65534;
7171     else
7172         return gid;
7173 }
7174 
7175 static inline int low2highuid(int uid)
7176 {
7177     if ((int16_t)uid == -1)
7178         return -1;
7179     else
7180         return uid;
7181 }
7182 
7183 static inline int low2highgid(int gid)
7184 {
7185     if ((int16_t)gid == -1)
7186         return -1;
7187     else
7188         return gid;
7189 }
7190 static inline int tswapid(int id)
7191 {
7192     return tswap16(id);
7193 }
7194 
7195 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7196 
7197 #else /* !USE_UID16 */
7198 static inline int high2lowuid(int uid)
7199 {
7200     return uid;
7201 }
7202 static inline int high2lowgid(int gid)
7203 {
7204     return gid;
7205 }
7206 static inline int low2highuid(int uid)
7207 {
7208     return uid;
7209 }
7210 static inline int low2highgid(int gid)
7211 {
7212     return gid;
7213 }
7214 static inline int tswapid(int id)
7215 {
7216     return tswap32(id);
7217 }
7218 
7219 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7220 
7221 #endif /* USE_UID16 */
7222 
7223 /* We must do direct syscalls for setting UID/GID, because we want to
7224  * implement the Linux system call semantics of "change only for this thread",
7225  * not the libc/POSIX semantics of "change for all threads in process".
7226  * (See http://ewontfix.com/17/ for more details.)
7227  * We use the 32-bit version of the syscalls if present; if it is not
7228  * then either the host architecture supports 32-bit UIDs natively with
7229  * the standard syscall, or the 16-bit UID is the best we can do.
7230  */
7231 #ifdef __NR_setuid32
7232 #define __NR_sys_setuid __NR_setuid32
7233 #else
7234 #define __NR_sys_setuid __NR_setuid
7235 #endif
7236 #ifdef __NR_setgid32
7237 #define __NR_sys_setgid __NR_setgid32
7238 #else
7239 #define __NR_sys_setgid __NR_setgid
7240 #endif
7241 #ifdef __NR_setresuid32
7242 #define __NR_sys_setresuid __NR_setresuid32
7243 #else
7244 #define __NR_sys_setresuid __NR_setresuid
7245 #endif
7246 #ifdef __NR_setresgid32
7247 #define __NR_sys_setresgid __NR_setresgid32
7248 #else
7249 #define __NR_sys_setresgid __NR_setresgid
7250 #endif
7251 
7252 _syscall1(int, sys_setuid, uid_t, uid)
7253 _syscall1(int, sys_setgid, gid_t, gid)
7254 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7255 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7256 
7257 void syscall_init(void)
7258 {
7259     IOCTLEntry *ie;
7260     const argtype *arg_type;
7261     int size;
7262 
7263     thunk_init(STRUCT_MAX);
7264 
7265 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7266 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7267 #include "syscall_types.h"
7268 #undef STRUCT
7269 #undef STRUCT_SPECIAL
7270 
7271     /* we patch the ioctl size if necessary. We rely on the fact that
7272        no ioctl has all the bits at '1' in the size field */
7273     ie = ioctl_entries;
7274     while (ie->target_cmd != 0) {
7275         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7276             TARGET_IOC_SIZEMASK) {
7277             arg_type = ie->arg_type;
7278             if (arg_type[0] != TYPE_PTR) {
7279                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7280                         ie->target_cmd);
7281                 exit(1);
7282             }
7283             arg_type++;
7284             size = thunk_type_size(arg_type, 0);
7285             ie->target_cmd = (ie->target_cmd &
7286                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7287                 (size << TARGET_IOC_SIZESHIFT);
7288         }
7289 
7290         /* automatic consistency check if same arch */
7291 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7292     (defined(__x86_64__) && defined(TARGET_X86_64))
7293         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7294             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7295                     ie->name, ie->target_cmd, ie->host_cmd);
7296         }
7297 #endif
7298         ie++;
7299     }
7300 }
7301 
7302 #ifdef TARGET_NR_truncate64
7303 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7304                                          abi_long arg2,
7305                                          abi_long arg3,
7306                                          abi_long arg4)
7307 {
7308     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7309         arg2 = arg3;
7310         arg3 = arg4;
7311     }
7312     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7313 }
7314 #endif
7315 
7316 #ifdef TARGET_NR_ftruncate64
7317 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7318                                           abi_long arg2,
7319                                           abi_long arg3,
7320                                           abi_long arg4)
7321 {
7322     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7323         arg2 = arg3;
7324         arg3 = arg4;
7325     }
7326     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7327 }
7328 #endif
7329 
7330 #if defined(TARGET_NR_timer_settime) || \
7331     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7332 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7333                                                  abi_ulong target_addr)
7334 {
7335     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7336                                 offsetof(struct target_itimerspec,
7337                                          it_interval)) ||
7338         target_to_host_timespec(&host_its->it_value, target_addr +
7339                                 offsetof(struct target_itimerspec,
7340                                          it_value))) {
7341         return -TARGET_EFAULT;
7342     }
7343 
7344     return 0;
7345 }
7346 #endif
7347 
7348 #if defined(TARGET_NR_timer_settime64) || \
7349     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7350 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7351                                                    abi_ulong target_addr)
7352 {
7353     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7354                                   offsetof(struct target__kernel_itimerspec,
7355                                            it_interval)) ||
7356         target_to_host_timespec64(&host_its->it_value, target_addr +
7357                                   offsetof(struct target__kernel_itimerspec,
7358                                            it_value))) {
7359         return -TARGET_EFAULT;
7360     }
7361 
7362     return 0;
7363 }
7364 #endif
7365 
7366 #if ((defined(TARGET_NR_timerfd_gettime) || \
7367       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7368       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7369 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7370                                                  struct itimerspec *host_its)
7371 {
7372     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7373                                                        it_interval),
7374                                 &host_its->it_interval) ||
7375         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7376                                                        it_value),
7377                                 &host_its->it_value)) {
7378         return -TARGET_EFAULT;
7379     }
7380     return 0;
7381 }
7382 #endif
7383 
7384 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7385       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7386       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7387 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7388                                                    struct itimerspec *host_its)
7389 {
7390     if (host_to_target_timespec64(target_addr +
7391                                   offsetof(struct target__kernel_itimerspec,
7392                                            it_interval),
7393                                   &host_its->it_interval) ||
7394         host_to_target_timespec64(target_addr +
7395                                   offsetof(struct target__kernel_itimerspec,
7396                                            it_value),
7397                                   &host_its->it_value)) {
7398         return -TARGET_EFAULT;
7399     }
7400     return 0;
7401 }
7402 #endif
7403 
7404 #if defined(TARGET_NR_adjtimex) || \
7405     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7406 static inline abi_long target_to_host_timex(struct timex *host_tx,
7407                                             abi_long target_addr)
7408 {
7409     struct target_timex *target_tx;
7410 
7411     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7412         return -TARGET_EFAULT;
7413     }
7414 
7415     __get_user(host_tx->modes, &target_tx->modes);
7416     __get_user(host_tx->offset, &target_tx->offset);
7417     __get_user(host_tx->freq, &target_tx->freq);
7418     __get_user(host_tx->maxerror, &target_tx->maxerror);
7419     __get_user(host_tx->esterror, &target_tx->esterror);
7420     __get_user(host_tx->status, &target_tx->status);
7421     __get_user(host_tx->constant, &target_tx->constant);
7422     __get_user(host_tx->precision, &target_tx->precision);
7423     __get_user(host_tx->tolerance, &target_tx->tolerance);
7424     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7425     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7426     __get_user(host_tx->tick, &target_tx->tick);
7427     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7428     __get_user(host_tx->jitter, &target_tx->jitter);
7429     __get_user(host_tx->shift, &target_tx->shift);
7430     __get_user(host_tx->stabil, &target_tx->stabil);
7431     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7432     __get_user(host_tx->calcnt, &target_tx->calcnt);
7433     __get_user(host_tx->errcnt, &target_tx->errcnt);
7434     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7435     __get_user(host_tx->tai, &target_tx->tai);
7436 
7437     unlock_user_struct(target_tx, target_addr, 0);
7438     return 0;
7439 }
7440 
7441 static inline abi_long host_to_target_timex(abi_long target_addr,
7442                                             struct timex *host_tx)
7443 {
7444     struct target_timex *target_tx;
7445 
7446     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7447         return -TARGET_EFAULT;
7448     }
7449 
7450     __put_user(host_tx->modes, &target_tx->modes);
7451     __put_user(host_tx->offset, &target_tx->offset);
7452     __put_user(host_tx->freq, &target_tx->freq);
7453     __put_user(host_tx->maxerror, &target_tx->maxerror);
7454     __put_user(host_tx->esterror, &target_tx->esterror);
7455     __put_user(host_tx->status, &target_tx->status);
7456     __put_user(host_tx->constant, &target_tx->constant);
7457     __put_user(host_tx->precision, &target_tx->precision);
7458     __put_user(host_tx->tolerance, &target_tx->tolerance);
7459     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7460     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7461     __put_user(host_tx->tick, &target_tx->tick);
7462     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7463     __put_user(host_tx->jitter, &target_tx->jitter);
7464     __put_user(host_tx->shift, &target_tx->shift);
7465     __put_user(host_tx->stabil, &target_tx->stabil);
7466     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7467     __put_user(host_tx->calcnt, &target_tx->calcnt);
7468     __put_user(host_tx->errcnt, &target_tx->errcnt);
7469     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7470     __put_user(host_tx->tai, &target_tx->tai);
7471 
7472     unlock_user_struct(target_tx, target_addr, 1);
7473     return 0;
7474 }
7475 #endif
7476 
7477 
7478 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7479 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7480                                               abi_long target_addr)
7481 {
7482     struct target__kernel_timex *target_tx;
7483 
7484     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7485                                  offsetof(struct target__kernel_timex,
7486                                           time))) {
7487         return -TARGET_EFAULT;
7488     }
7489 
7490     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7491         return -TARGET_EFAULT;
7492     }
7493 
7494     __get_user(host_tx->modes, &target_tx->modes);
7495     __get_user(host_tx->offset, &target_tx->offset);
7496     __get_user(host_tx->freq, &target_tx->freq);
7497     __get_user(host_tx->maxerror, &target_tx->maxerror);
7498     __get_user(host_tx->esterror, &target_tx->esterror);
7499     __get_user(host_tx->status, &target_tx->status);
7500     __get_user(host_tx->constant, &target_tx->constant);
7501     __get_user(host_tx->precision, &target_tx->precision);
7502     __get_user(host_tx->tolerance, &target_tx->tolerance);
7503     __get_user(host_tx->tick, &target_tx->tick);
7504     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7505     __get_user(host_tx->jitter, &target_tx->jitter);
7506     __get_user(host_tx->shift, &target_tx->shift);
7507     __get_user(host_tx->stabil, &target_tx->stabil);
7508     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7509     __get_user(host_tx->calcnt, &target_tx->calcnt);
7510     __get_user(host_tx->errcnt, &target_tx->errcnt);
7511     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7512     __get_user(host_tx->tai, &target_tx->tai);
7513 
7514     unlock_user_struct(target_tx, target_addr, 0);
7515     return 0;
7516 }
7517 
7518 static inline abi_long host_to_target_timex64(abi_long target_addr,
7519                                               struct timex *host_tx)
7520 {
7521     struct target__kernel_timex *target_tx;
7522 
7523    if (copy_to_user_timeval64(target_addr +
7524                               offsetof(struct target__kernel_timex, time),
7525                               &host_tx->time)) {
7526         return -TARGET_EFAULT;
7527     }
7528 
7529     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7530         return -TARGET_EFAULT;
7531     }
7532 
7533     __put_user(host_tx->modes, &target_tx->modes);
7534     __put_user(host_tx->offset, &target_tx->offset);
7535     __put_user(host_tx->freq, &target_tx->freq);
7536     __put_user(host_tx->maxerror, &target_tx->maxerror);
7537     __put_user(host_tx->esterror, &target_tx->esterror);
7538     __put_user(host_tx->status, &target_tx->status);
7539     __put_user(host_tx->constant, &target_tx->constant);
7540     __put_user(host_tx->precision, &target_tx->precision);
7541     __put_user(host_tx->tolerance, &target_tx->tolerance);
7542     __put_user(host_tx->tick, &target_tx->tick);
7543     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7544     __put_user(host_tx->jitter, &target_tx->jitter);
7545     __put_user(host_tx->shift, &target_tx->shift);
7546     __put_user(host_tx->stabil, &target_tx->stabil);
7547     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7548     __put_user(host_tx->calcnt, &target_tx->calcnt);
7549     __put_user(host_tx->errcnt, &target_tx->errcnt);
7550     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7551     __put_user(host_tx->tai, &target_tx->tai);
7552 
7553     unlock_user_struct(target_tx, target_addr, 1);
7554     return 0;
7555 }
7556 #endif
7557 
7558 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7559 #define sigev_notify_thread_id _sigev_un._tid
7560 #endif
7561 
7562 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7563                                                abi_ulong target_addr)
7564 {
7565     struct target_sigevent *target_sevp;
7566 
7567     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7568         return -TARGET_EFAULT;
7569     }
7570 
7571     /* This union is awkward on 64 bit systems because it has a 32 bit
7572      * integer and a pointer in it; we follow the conversion approach
7573      * used for handling sigval types in signal.c so the guest should get
7574      * the correct value back even if we did a 64 bit byteswap and it's
7575      * using the 32 bit integer.
7576      */
7577     host_sevp->sigev_value.sival_ptr =
7578         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7579     host_sevp->sigev_signo =
7580         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7581     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7582     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7583 
7584     unlock_user_struct(target_sevp, target_addr, 1);
7585     return 0;
7586 }
7587 
7588 #if defined(TARGET_NR_mlockall)
7589 static inline int target_to_host_mlockall_arg(int arg)
7590 {
7591     int result = 0;
7592 
7593     if (arg & TARGET_MCL_CURRENT) {
7594         result |= MCL_CURRENT;
7595     }
7596     if (arg & TARGET_MCL_FUTURE) {
7597         result |= MCL_FUTURE;
7598     }
7599 #ifdef MCL_ONFAULT
7600     if (arg & TARGET_MCL_ONFAULT) {
7601         result |= MCL_ONFAULT;
7602     }
7603 #endif
7604 
7605     return result;
7606 }
7607 #endif
7608 
7609 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7610      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7611      defined(TARGET_NR_newfstatat))
7612 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7613                                              abi_ulong target_addr,
7614                                              struct stat *host_st)
7615 {
7616 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7617     if (cpu_env->eabi) {
7618         struct target_eabi_stat64 *target_st;
7619 
7620         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7621             return -TARGET_EFAULT;
7622         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7623         __put_user(host_st->st_dev, &target_st->st_dev);
7624         __put_user(host_st->st_ino, &target_st->st_ino);
7625 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7626         __put_user(host_st->st_ino, &target_st->__st_ino);
7627 #endif
7628         __put_user(host_st->st_mode, &target_st->st_mode);
7629         __put_user(host_st->st_nlink, &target_st->st_nlink);
7630         __put_user(host_st->st_uid, &target_st->st_uid);
7631         __put_user(host_st->st_gid, &target_st->st_gid);
7632         __put_user(host_st->st_rdev, &target_st->st_rdev);
7633         __put_user(host_st->st_size, &target_st->st_size);
7634         __put_user(host_st->st_blksize, &target_st->st_blksize);
7635         __put_user(host_st->st_blocks, &target_st->st_blocks);
7636         __put_user(host_st->st_atime, &target_st->target_st_atime);
7637         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7638         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7639 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7640         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7641         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7642         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7643 #endif
7644         unlock_user_struct(target_st, target_addr, 1);
7645     } else
7646 #endif
7647     {
7648 #if defined(TARGET_HAS_STRUCT_STAT64)
7649         struct target_stat64 *target_st;
7650 #else
7651         struct target_stat *target_st;
7652 #endif
7653 
7654         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7655             return -TARGET_EFAULT;
7656         memset(target_st, 0, sizeof(*target_st));
7657         __put_user(host_st->st_dev, &target_st->st_dev);
7658         __put_user(host_st->st_ino, &target_st->st_ino);
7659 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7660         __put_user(host_st->st_ino, &target_st->__st_ino);
7661 #endif
7662         __put_user(host_st->st_mode, &target_st->st_mode);
7663         __put_user(host_st->st_nlink, &target_st->st_nlink);
7664         __put_user(host_st->st_uid, &target_st->st_uid);
7665         __put_user(host_st->st_gid, &target_st->st_gid);
7666         __put_user(host_st->st_rdev, &target_st->st_rdev);
7667         /* XXX: better use of kernel struct */
7668         __put_user(host_st->st_size, &target_st->st_size);
7669         __put_user(host_st->st_blksize, &target_st->st_blksize);
7670         __put_user(host_st->st_blocks, &target_st->st_blocks);
7671         __put_user(host_st->st_atime, &target_st->target_st_atime);
7672         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7673         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7674 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7675         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7676         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7677         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7678 #endif
7679         unlock_user_struct(target_st, target_addr, 1);
7680     }
7681 
7682     return 0;
7683 }
7684 #endif
7685 
7686 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7687 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7688                                             abi_ulong target_addr)
7689 {
7690     struct target_statx *target_stx;
7691 
7692     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7693         return -TARGET_EFAULT;
7694     }
7695     memset(target_stx, 0, sizeof(*target_stx));
7696 
7697     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7698     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7699     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7700     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7701     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7702     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7703     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7704     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7705     __put_user(host_stx->stx_size, &target_stx->stx_size);
7706     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7707     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7708     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7709     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7710     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7711     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7712     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7713     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7714     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7715     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7716     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7717     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7718     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7719     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7720 
7721     unlock_user_struct(target_stx, target_addr, 1);
7722 
7723     return 0;
7724 }
7725 #endif
7726 
7727 static int do_sys_futex(int *uaddr, int op, int val,
7728                          const struct timespec *timeout, int *uaddr2,
7729                          int val3)
7730 {
7731 #if HOST_LONG_BITS == 64
7732 #if defined(__NR_futex)
7733     /* always a 64-bit time_t, it doesn't define _time64 version  */
7734     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7735 
7736 #endif
7737 #else /* HOST_LONG_BITS == 64 */
7738 #if defined(__NR_futex_time64)
7739     if (sizeof(timeout->tv_sec) == 8) {
7740         /* _time64 function on 32bit arch */
7741         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7742     }
7743 #endif
7744 #if defined(__NR_futex)
7745     /* old function on 32bit arch */
7746     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7747 #endif
7748 #endif /* HOST_LONG_BITS == 64 */
7749     g_assert_not_reached();
7750 }
7751 
7752 static int do_safe_futex(int *uaddr, int op, int val,
7753                          const struct timespec *timeout, int *uaddr2,
7754                          int val3)
7755 {
7756 #if HOST_LONG_BITS == 64
7757 #if defined(__NR_futex)
7758     /* always a 64-bit time_t, it doesn't define _time64 version  */
7759     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7760 #endif
7761 #else /* HOST_LONG_BITS == 64 */
7762 #if defined(__NR_futex_time64)
7763     if (sizeof(timeout->tv_sec) == 8) {
7764         /* _time64 function on 32bit arch */
7765         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7766                                            val3));
7767     }
7768 #endif
7769 #if defined(__NR_futex)
7770     /* old function on 32bit arch */
7771     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7772 #endif
7773 #endif /* HOST_LONG_BITS == 64 */
7774     return -TARGET_ENOSYS;
7775 }
7776 
7777 /* ??? Using host futex calls even when target atomic operations
7778    are not really atomic probably breaks things.  However implementing
7779    futexes locally would make futexes shared between multiple processes
7780    tricky.  However they're probably useless because guest atomic
7781    operations won't work either.  */
7782 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7783 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7784                     int op, int val, target_ulong timeout,
7785                     target_ulong uaddr2, int val3)
7786 {
7787     struct timespec ts, *pts = NULL;
7788     void *haddr2 = NULL;
7789     int base_op;
7790 
7791     /* We assume FUTEX_* constants are the same on both host and target. */
7792 #ifdef FUTEX_CMD_MASK
7793     base_op = op & FUTEX_CMD_MASK;
7794 #else
7795     base_op = op;
7796 #endif
7797     switch (base_op) {
7798     case FUTEX_WAIT:
7799     case FUTEX_WAIT_BITSET:
7800         val = tswap32(val);
7801         break;
7802     case FUTEX_WAIT_REQUEUE_PI:
7803         val = tswap32(val);
7804         haddr2 = g2h(cpu, uaddr2);
7805         break;
7806     case FUTEX_LOCK_PI:
7807     case FUTEX_LOCK_PI2:
7808         break;
7809     case FUTEX_WAKE:
7810     case FUTEX_WAKE_BITSET:
7811     case FUTEX_TRYLOCK_PI:
7812     case FUTEX_UNLOCK_PI:
7813         timeout = 0;
7814         break;
7815     case FUTEX_FD:
7816         val = target_to_host_signal(val);
7817         timeout = 0;
7818         break;
7819     case FUTEX_CMP_REQUEUE:
7820     case FUTEX_CMP_REQUEUE_PI:
7821         val3 = tswap32(val3);
7822         /* fall through */
7823     case FUTEX_REQUEUE:
7824     case FUTEX_WAKE_OP:
7825         /*
7826          * For these, the 4th argument is not TIMEOUT, but VAL2.
7827          * But the prototype of do_safe_futex takes a pointer, so
7828          * insert casts to satisfy the compiler.  We do not need
7829          * to tswap VAL2 since it's not compared to guest memory.
7830           */
7831         pts = (struct timespec *)(uintptr_t)timeout;
7832         timeout = 0;
7833         haddr2 = g2h(cpu, uaddr2);
7834         break;
7835     default:
7836         return -TARGET_ENOSYS;
7837     }
7838     if (timeout) {
7839         pts = &ts;
7840         if (time64
7841             ? target_to_host_timespec64(pts, timeout)
7842             : target_to_host_timespec(pts, timeout)) {
7843             return -TARGET_EFAULT;
7844         }
7845     }
7846     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7847 }
7848 #endif
7849 
7850 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7851 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7852                                      abi_long handle, abi_long mount_id,
7853                                      abi_long flags)
7854 {
7855     struct file_handle *target_fh;
7856     struct file_handle *fh;
7857     int mid = 0;
7858     abi_long ret;
7859     char *name;
7860     unsigned int size, total_size;
7861 
7862     if (get_user_s32(size, handle)) {
7863         return -TARGET_EFAULT;
7864     }
7865 
7866     name = lock_user_string(pathname);
7867     if (!name) {
7868         return -TARGET_EFAULT;
7869     }
7870 
7871     total_size = sizeof(struct file_handle) + size;
7872     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7873     if (!target_fh) {
7874         unlock_user(name, pathname, 0);
7875         return -TARGET_EFAULT;
7876     }
7877 
7878     fh = g_malloc0(total_size);
7879     fh->handle_bytes = size;
7880 
7881     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7882     unlock_user(name, pathname, 0);
7883 
7884     /* man name_to_handle_at(2):
7885      * Other than the use of the handle_bytes field, the caller should treat
7886      * the file_handle structure as an opaque data type
7887      */
7888 
7889     memcpy(target_fh, fh, total_size);
7890     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7891     target_fh->handle_type = tswap32(fh->handle_type);
7892     g_free(fh);
7893     unlock_user(target_fh, handle, total_size);
7894 
7895     if (put_user_s32(mid, mount_id)) {
7896         return -TARGET_EFAULT;
7897     }
7898 
7899     return ret;
7900 
7901 }
7902 #endif
7903 
7904 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7905 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7906                                      abi_long flags)
7907 {
7908     struct file_handle *target_fh;
7909     struct file_handle *fh;
7910     unsigned int size, total_size;
7911     abi_long ret;
7912 
7913     if (get_user_s32(size, handle)) {
7914         return -TARGET_EFAULT;
7915     }
7916 
7917     total_size = sizeof(struct file_handle) + size;
7918     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7919     if (!target_fh) {
7920         return -TARGET_EFAULT;
7921     }
7922 
7923     fh = g_memdup(target_fh, total_size);
7924     fh->handle_bytes = size;
7925     fh->handle_type = tswap32(target_fh->handle_type);
7926 
7927     ret = get_errno(open_by_handle_at(mount_fd, fh,
7928                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7929 
7930     g_free(fh);
7931 
7932     unlock_user(target_fh, handle, total_size);
7933 
7934     return ret;
7935 }
7936 #endif
7937 
7938 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7939 
7940 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7941 {
7942     int host_flags;
7943     target_sigset_t *target_mask;
7944     sigset_t host_mask;
7945     abi_long ret;
7946 
7947     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7948         return -TARGET_EINVAL;
7949     }
7950     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7951         return -TARGET_EFAULT;
7952     }
7953 
7954     target_to_host_sigset(&host_mask, target_mask);
7955 
7956     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7957 
7958     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7959     if (ret >= 0) {
7960         fd_trans_register(ret, &target_signalfd_trans);
7961     }
7962 
7963     unlock_user_struct(target_mask, mask, 0);
7964 
7965     return ret;
7966 }
7967 #endif
7968 
7969 /* Map host to target signal numbers for the wait family of syscalls.
7970    Assume all other status bits are the same.  */
7971 int host_to_target_waitstatus(int status)
7972 {
7973     if (WIFSIGNALED(status)) {
7974         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7975     }
7976     if (WIFSTOPPED(status)) {
7977         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7978                | (status & 0xff);
7979     }
7980     return status;
7981 }
7982 
7983 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7984 {
7985     CPUState *cpu = env_cpu(cpu_env);
7986     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7987     int i;
7988 
7989     for (i = 0; i < bprm->argc; i++) {
7990         size_t len = strlen(bprm->argv[i]) + 1;
7991 
7992         if (write(fd, bprm->argv[i], len) != len) {
7993             return -1;
7994         }
7995     }
7996 
7997     return 0;
7998 }
7999 
8000 static int open_self_maps(CPUArchState *cpu_env, int fd)
8001 {
8002     CPUState *cpu = env_cpu(cpu_env);
8003     TaskState *ts = cpu->opaque;
8004     GSList *map_info = read_self_maps();
8005     GSList *s;
8006     int count;
8007 
8008     for (s = map_info; s; s = g_slist_next(s)) {
8009         MapInfo *e = (MapInfo *) s->data;
8010 
8011         if (h2g_valid(e->start)) {
8012             unsigned long min = e->start;
8013             unsigned long max = e->end;
8014             int flags = page_get_flags(h2g(min));
8015             const char *path;
8016 
8017             max = h2g_valid(max - 1) ?
8018                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8019 
8020             if (page_check_range(h2g(min), max - min, flags) == -1) {
8021                 continue;
8022             }
8023 
8024 #ifdef TARGET_HPPA
8025             if (h2g(max) == ts->info->stack_limit) {
8026 #else
8027             if (h2g(min) == ts->info->stack_limit) {
8028 #endif
8029                 path = "[stack]";
8030             } else {
8031                 path = e->path;
8032             }
8033 
8034             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8035                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8036                             h2g(min), h2g(max - 1) + 1,
8037                             (flags & PAGE_READ) ? 'r' : '-',
8038                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8039                             (flags & PAGE_EXEC) ? 'x' : '-',
8040                             e->is_priv ? 'p' : 's',
8041                             (uint64_t) e->offset, e->dev, e->inode);
8042             if (path) {
8043                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8044             } else {
8045                 dprintf(fd, "\n");
8046             }
8047         }
8048     }
8049 
8050     free_self_maps(map_info);
8051 
8052 #ifdef TARGET_VSYSCALL_PAGE
8053     /*
8054      * We only support execution from the vsyscall page.
8055      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8056      */
8057     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8058                     " --xp 00000000 00:00 0",
8059                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8060     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8061 #endif
8062 
8063     return 0;
8064 }
8065 
8066 static int open_self_stat(CPUArchState *cpu_env, int fd)
8067 {
8068     CPUState *cpu = env_cpu(cpu_env);
8069     TaskState *ts = cpu->opaque;
8070     g_autoptr(GString) buf = g_string_new(NULL);
8071     int i;
8072 
8073     for (i = 0; i < 44; i++) {
8074         if (i == 0) {
8075             /* pid */
8076             g_string_printf(buf, FMT_pid " ", getpid());
8077         } else if (i == 1) {
8078             /* app name */
8079             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8080             bin = bin ? bin + 1 : ts->bprm->argv[0];
8081             g_string_printf(buf, "(%.15s) ", bin);
8082         } else if (i == 3) {
8083             /* ppid */
8084             g_string_printf(buf, FMT_pid " ", getppid());
8085         } else if (i == 21) {
8086             /* starttime */
8087             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8088         } else if (i == 27) {
8089             /* stack bottom */
8090             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8091         } else {
8092             /* for the rest, there is MasterCard */
8093             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8094         }
8095 
8096         if (write(fd, buf->str, buf->len) != buf->len) {
8097             return -1;
8098         }
8099     }
8100 
8101     return 0;
8102 }
8103 
8104 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8105 {
8106     CPUState *cpu = env_cpu(cpu_env);
8107     TaskState *ts = cpu->opaque;
8108     abi_ulong auxv = ts->info->saved_auxv;
8109     abi_ulong len = ts->info->auxv_len;
8110     char *ptr;
8111 
8112     /*
8113      * Auxiliary vector is stored in target process stack.
8114      * read in whole auxv vector and copy it to file
8115      */
8116     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8117     if (ptr != NULL) {
8118         while (len > 0) {
8119             ssize_t r;
8120             r = write(fd, ptr, len);
8121             if (r <= 0) {
8122                 break;
8123             }
8124             len -= r;
8125             ptr += r;
8126         }
8127         lseek(fd, 0, SEEK_SET);
8128         unlock_user(ptr, auxv, len);
8129     }
8130 
8131     return 0;
8132 }
8133 
8134 static int is_proc_myself(const char *filename, const char *entry)
8135 {
8136     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8137         filename += strlen("/proc/");
8138         if (!strncmp(filename, "self/", strlen("self/"))) {
8139             filename += strlen("self/");
8140         } else if (*filename >= '1' && *filename <= '9') {
8141             char myself[80];
8142             snprintf(myself, sizeof(myself), "%d/", getpid());
8143             if (!strncmp(filename, myself, strlen(myself))) {
8144                 filename += strlen(myself);
8145             } else {
8146                 return 0;
8147             }
8148         } else {
8149             return 0;
8150         }
8151         if (!strcmp(filename, entry)) {
8152             return 1;
8153         }
8154     }
8155     return 0;
8156 }
8157 
8158 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8159                       const char *fmt, int code)
8160 {
8161     if (logfile) {
8162         CPUState *cs = env_cpu(env);
8163 
8164         fprintf(logfile, fmt, code);
8165         fprintf(logfile, "Failing executable: %s\n", exec_path);
8166         cpu_dump_state(cs, logfile, 0);
8167         open_self_maps(env, fileno(logfile));
8168     }
8169 }
8170 
8171 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8172 {
8173     /* dump to console */
8174     excp_dump_file(stderr, env, fmt, code);
8175 
8176     /* dump to log file */
8177     if (qemu_log_separate()) {
8178         FILE *logfile = qemu_log_trylock();
8179 
8180         excp_dump_file(logfile, env, fmt, code);
8181         qemu_log_unlock(logfile);
8182     }
8183 }
8184 
8185 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8186     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8187 static int is_proc(const char *filename, const char *entry)
8188 {
8189     return strcmp(filename, entry) == 0;
8190 }
8191 #endif
8192 
8193 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8194 static int open_net_route(CPUArchState *cpu_env, int fd)
8195 {
8196     FILE *fp;
8197     char *line = NULL;
8198     size_t len = 0;
8199     ssize_t read;
8200 
8201     fp = fopen("/proc/net/route", "r");
8202     if (fp == NULL) {
8203         return -1;
8204     }
8205 
8206     /* read header */
8207 
8208     read = getline(&line, &len, fp);
8209     dprintf(fd, "%s", line);
8210 
8211     /* read routes */
8212 
8213     while ((read = getline(&line, &len, fp)) != -1) {
8214         char iface[16];
8215         uint32_t dest, gw, mask;
8216         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8217         int fields;
8218 
8219         fields = sscanf(line,
8220                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8221                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8222                         &mask, &mtu, &window, &irtt);
8223         if (fields != 11) {
8224             continue;
8225         }
8226         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8227                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8228                 metric, tswap32(mask), mtu, window, irtt);
8229     }
8230 
8231     free(line);
8232     fclose(fp);
8233 
8234     return 0;
8235 }
8236 #endif
8237 
8238 #if defined(TARGET_SPARC)
8239 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8240 {
8241     dprintf(fd, "type\t\t: sun4u\n");
8242     return 0;
8243 }
8244 #endif
8245 
8246 #if defined(TARGET_HPPA)
8247 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8248 {
8249     int i, num_cpus;
8250 
8251     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8252     for (i = 0; i < num_cpus; i++) {
8253         dprintf(fd, "processor\t: %d\n", i);
8254         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8255         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8256         dprintf(fd, "capabilities\t: os32\n");
8257         dprintf(fd, "model\t\t: 9000/778/B160L - "
8258                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8259     }
8260     return 0;
8261 }
8262 #endif
8263 
8264 #if defined(TARGET_M68K)
8265 static int open_hardware(CPUArchState *cpu_env, int fd)
8266 {
8267     dprintf(fd, "Model:\t\tqemu-m68k\n");
8268     return 0;
8269 }
8270 #endif
8271 
8272 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8273 {
8274     struct fake_open {
8275         const char *filename;
8276         int (*fill)(CPUArchState *cpu_env, int fd);
8277         int (*cmp)(const char *s1, const char *s2);
8278     };
8279     const struct fake_open *fake_open;
8280     static const struct fake_open fakes[] = {
8281         { "maps", open_self_maps, is_proc_myself },
8282         { "stat", open_self_stat, is_proc_myself },
8283         { "auxv", open_self_auxv, is_proc_myself },
8284         { "cmdline", open_self_cmdline, is_proc_myself },
8285 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8286         { "/proc/net/route", open_net_route, is_proc },
8287 #endif
8288 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8289         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8290 #endif
8291 #if defined(TARGET_M68K)
8292         { "/proc/hardware", open_hardware, is_proc },
8293 #endif
8294         { NULL, NULL, NULL }
8295     };
8296 
8297     if (is_proc_myself(pathname, "exe")) {
8298         return safe_openat(dirfd, exec_path, flags, mode);
8299     }
8300 
8301     for (fake_open = fakes; fake_open->filename; fake_open++) {
8302         if (fake_open->cmp(pathname, fake_open->filename)) {
8303             break;
8304         }
8305     }
8306 
8307     if (fake_open->filename) {
8308         const char *tmpdir;
8309         char filename[PATH_MAX];
8310         int fd, r;
8311 
8312         fd = memfd_create("qemu-open", 0);
8313         if (fd < 0) {
8314             if (errno != ENOSYS) {
8315                 return fd;
8316             }
8317             /* create temporary file to map stat to */
8318             tmpdir = getenv("TMPDIR");
8319             if (!tmpdir)
8320                 tmpdir = "/tmp";
8321             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8322             fd = mkstemp(filename);
8323             if (fd < 0) {
8324                 return fd;
8325             }
8326             unlink(filename);
8327         }
8328 
8329         if ((r = fake_open->fill(cpu_env, fd))) {
8330             int e = errno;
8331             close(fd);
8332             errno = e;
8333             return r;
8334         }
8335         lseek(fd, 0, SEEK_SET);
8336 
8337         return fd;
8338     }
8339 
8340     return safe_openat(dirfd, path(pathname), flags, mode);
8341 }
8342 
8343 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8344                        abi_long pathname, abi_long guest_argp,
8345                        abi_long guest_envp, int flags)
8346 {
8347     int ret;
8348     char **argp, **envp;
8349     int argc, envc;
8350     abi_ulong gp;
8351     abi_ulong addr;
8352     char **q;
8353     void *p;
8354 
8355     argc = 0;
8356 
8357     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8358         if (get_user_ual(addr, gp)) {
8359             return -TARGET_EFAULT;
8360         }
8361         if (!addr) {
8362             break;
8363         }
8364         argc++;
8365     }
8366     envc = 0;
8367     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8368         if (get_user_ual(addr, gp)) {
8369             return -TARGET_EFAULT;
8370         }
8371         if (!addr) {
8372             break;
8373         }
8374         envc++;
8375     }
8376 
8377     argp = g_new0(char *, argc + 1);
8378     envp = g_new0(char *, envc + 1);
8379 
8380     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8381         if (get_user_ual(addr, gp)) {
8382             goto execve_efault;
8383         }
8384         if (!addr) {
8385             break;
8386         }
8387         *q = lock_user_string(addr);
8388         if (!*q) {
8389             goto execve_efault;
8390         }
8391     }
8392     *q = NULL;
8393 
8394     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8395         if (get_user_ual(addr, gp)) {
8396             goto execve_efault;
8397         }
8398         if (!addr) {
8399             break;
8400         }
8401         *q = lock_user_string(addr);
8402         if (!*q) {
8403             goto execve_efault;
8404         }
8405     }
8406     *q = NULL;
8407 
8408     /*
8409      * Although execve() is not an interruptible syscall it is
8410      * a special case where we must use the safe_syscall wrapper:
8411      * if we allow a signal to happen before we make the host
8412      * syscall then we will 'lose' it, because at the point of
8413      * execve the process leaves QEMU's control. So we use the
8414      * safe syscall wrapper to ensure that we either take the
8415      * signal as a guest signal, or else it does not happen
8416      * before the execve completes and makes it the other
8417      * program's problem.
8418      */
8419     p = lock_user_string(pathname);
8420     if (!p) {
8421         goto execve_efault;
8422     }
8423 
8424     if (is_proc_myself(p, "exe")) {
8425         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8426     } else {
8427         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8428     }
8429 
8430     unlock_user(p, pathname, 0);
8431 
8432     goto execve_end;
8433 
8434 execve_efault:
8435     ret = -TARGET_EFAULT;
8436 
8437 execve_end:
8438     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8439         if (get_user_ual(addr, gp) || !addr) {
8440             break;
8441         }
8442         unlock_user(*q, addr, 0);
8443     }
8444     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8445         if (get_user_ual(addr, gp) || !addr) {
8446             break;
8447         }
8448         unlock_user(*q, addr, 0);
8449     }
8450 
8451     g_free(argp);
8452     g_free(envp);
8453     return ret;
8454 }
8455 
8456 #define TIMER_MAGIC 0x0caf0000
8457 #define TIMER_MAGIC_MASK 0xffff0000
8458 
8459 /* Convert QEMU provided timer ID back to internal 16bit index format */
8460 static target_timer_t get_timer_id(abi_long arg)
8461 {
8462     target_timer_t timerid = arg;
8463 
8464     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8465         return -TARGET_EINVAL;
8466     }
8467 
8468     timerid &= 0xffff;
8469 
8470     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8471         return -TARGET_EINVAL;
8472     }
8473 
8474     return timerid;
8475 }
8476 
8477 static int target_to_host_cpu_mask(unsigned long *host_mask,
8478                                    size_t host_size,
8479                                    abi_ulong target_addr,
8480                                    size_t target_size)
8481 {
8482     unsigned target_bits = sizeof(abi_ulong) * 8;
8483     unsigned host_bits = sizeof(*host_mask) * 8;
8484     abi_ulong *target_mask;
8485     unsigned i, j;
8486 
8487     assert(host_size >= target_size);
8488 
8489     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8490     if (!target_mask) {
8491         return -TARGET_EFAULT;
8492     }
8493     memset(host_mask, 0, host_size);
8494 
8495     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8496         unsigned bit = i * target_bits;
8497         abi_ulong val;
8498 
8499         __get_user(val, &target_mask[i]);
8500         for (j = 0; j < target_bits; j++, bit++) {
8501             if (val & (1UL << j)) {
8502                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8503             }
8504         }
8505     }
8506 
8507     unlock_user(target_mask, target_addr, 0);
8508     return 0;
8509 }
8510 
8511 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8512                                    size_t host_size,
8513                                    abi_ulong target_addr,
8514                                    size_t target_size)
8515 {
8516     unsigned target_bits = sizeof(abi_ulong) * 8;
8517     unsigned host_bits = sizeof(*host_mask) * 8;
8518     abi_ulong *target_mask;
8519     unsigned i, j;
8520 
8521     assert(host_size >= target_size);
8522 
8523     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8524     if (!target_mask) {
8525         return -TARGET_EFAULT;
8526     }
8527 
8528     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8529         unsigned bit = i * target_bits;
8530         abi_ulong val = 0;
8531 
8532         for (j = 0; j < target_bits; j++, bit++) {
8533             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8534                 val |= 1UL << j;
8535             }
8536         }
8537         __put_user(val, &target_mask[i]);
8538     }
8539 
8540     unlock_user(target_mask, target_addr, target_size);
8541     return 0;
8542 }
8543 
8544 #ifdef TARGET_NR_getdents
8545 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8546 {
8547     g_autofree void *hdirp = NULL;
8548     void *tdirp;
8549     int hlen, hoff, toff;
8550     int hreclen, treclen;
8551     off64_t prev_diroff = 0;
8552 
8553     hdirp = g_try_malloc(count);
8554     if (!hdirp) {
8555         return -TARGET_ENOMEM;
8556     }
8557 
8558 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8559     hlen = sys_getdents(dirfd, hdirp, count);
8560 #else
8561     hlen = sys_getdents64(dirfd, hdirp, count);
8562 #endif
8563 
8564     hlen = get_errno(hlen);
8565     if (is_error(hlen)) {
8566         return hlen;
8567     }
8568 
8569     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8570     if (!tdirp) {
8571         return -TARGET_EFAULT;
8572     }
8573 
8574     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8575 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8576         struct linux_dirent *hde = hdirp + hoff;
8577 #else
8578         struct linux_dirent64 *hde = hdirp + hoff;
8579 #endif
8580         struct target_dirent *tde = tdirp + toff;
8581         int namelen;
8582         uint8_t type;
8583 
8584         namelen = strlen(hde->d_name);
8585         hreclen = hde->d_reclen;
8586         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8587         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8588 
8589         if (toff + treclen > count) {
8590             /*
8591              * If the host struct is smaller than the target struct, or
8592              * requires less alignment and thus packs into less space,
8593              * then the host can return more entries than we can pass
8594              * on to the guest.
8595              */
8596             if (toff == 0) {
8597                 toff = -TARGET_EINVAL; /* result buffer is too small */
8598                 break;
8599             }
8600             /*
8601              * Return what we have, resetting the file pointer to the
8602              * location of the first record not returned.
8603              */
8604             lseek64(dirfd, prev_diroff, SEEK_SET);
8605             break;
8606         }
8607 
8608         prev_diroff = hde->d_off;
8609         tde->d_ino = tswapal(hde->d_ino);
8610         tde->d_off = tswapal(hde->d_off);
8611         tde->d_reclen = tswap16(treclen);
8612         memcpy(tde->d_name, hde->d_name, namelen + 1);
8613 
8614         /*
8615          * The getdents type is in what was formerly a padding byte at the
8616          * end of the structure.
8617          */
8618 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8619         type = *((uint8_t *)hde + hreclen - 1);
8620 #else
8621         type = hde->d_type;
8622 #endif
8623         *((uint8_t *)tde + treclen - 1) = type;
8624     }
8625 
8626     unlock_user(tdirp, arg2, toff);
8627     return toff;
8628 }
8629 #endif /* TARGET_NR_getdents */
8630 
8631 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8632 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8633 {
8634     g_autofree void *hdirp = NULL;
8635     void *tdirp;
8636     int hlen, hoff, toff;
8637     int hreclen, treclen;
8638     off64_t prev_diroff = 0;
8639 
8640     hdirp = g_try_malloc(count);
8641     if (!hdirp) {
8642         return -TARGET_ENOMEM;
8643     }
8644 
8645     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8646     if (is_error(hlen)) {
8647         return hlen;
8648     }
8649 
8650     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8651     if (!tdirp) {
8652         return -TARGET_EFAULT;
8653     }
8654 
8655     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8656         struct linux_dirent64 *hde = hdirp + hoff;
8657         struct target_dirent64 *tde = tdirp + toff;
8658         int namelen;
8659 
8660         namelen = strlen(hde->d_name) + 1;
8661         hreclen = hde->d_reclen;
8662         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8663         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8664 
8665         if (toff + treclen > count) {
8666             /*
8667              * If the host struct is smaller than the target struct, or
8668              * requires less alignment and thus packs into less space,
8669              * then the host can return more entries than we can pass
8670              * on to the guest.
8671              */
8672             if (toff == 0) {
8673                 toff = -TARGET_EINVAL; /* result buffer is too small */
8674                 break;
8675             }
8676             /*
8677              * Return what we have, resetting the file pointer to the
8678              * location of the first record not returned.
8679              */
8680             lseek64(dirfd, prev_diroff, SEEK_SET);
8681             break;
8682         }
8683 
8684         prev_diroff = hde->d_off;
8685         tde->d_ino = tswap64(hde->d_ino);
8686         tde->d_off = tswap64(hde->d_off);
8687         tde->d_reclen = tswap16(treclen);
8688         tde->d_type = hde->d_type;
8689         memcpy(tde->d_name, hde->d_name, namelen);
8690     }
8691 
8692     unlock_user(tdirp, arg2, toff);
8693     return toff;
8694 }
8695 #endif /* TARGET_NR_getdents64 */
8696 
8697 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8698 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8699 #endif
8700 
8701 /* This is an internal helper for do_syscall so that it is easier
8702  * to have a single return point, so that actions, such as logging
8703  * of syscall results, can be performed.
8704  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8705  */
8706 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8707                             abi_long arg2, abi_long arg3, abi_long arg4,
8708                             abi_long arg5, abi_long arg6, abi_long arg7,
8709                             abi_long arg8)
8710 {
8711     CPUState *cpu = env_cpu(cpu_env);
8712     abi_long ret;
8713 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8714     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8715     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8716     || defined(TARGET_NR_statx)
8717     struct stat st;
8718 #endif
8719 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8720     || defined(TARGET_NR_fstatfs)
8721     struct statfs stfs;
8722 #endif
8723     void *p;
8724 
8725     switch(num) {
8726     case TARGET_NR_exit:
8727         /* In old applications this may be used to implement _exit(2).
8728            However in threaded applications it is used for thread termination,
8729            and _exit_group is used for application termination.
8730            Do thread termination if we have more then one thread.  */
8731 
8732         if (block_signals()) {
8733             return -QEMU_ERESTARTSYS;
8734         }
8735 
8736         pthread_mutex_lock(&clone_lock);
8737 
8738         if (CPU_NEXT(first_cpu)) {
8739             TaskState *ts = cpu->opaque;
8740 
8741             if (ts->child_tidptr) {
8742                 put_user_u32(0, ts->child_tidptr);
8743                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8744                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8745             }
8746 
8747             object_unparent(OBJECT(cpu));
8748             object_unref(OBJECT(cpu));
8749             /*
8750              * At this point the CPU should be unrealized and removed
8751              * from cpu lists. We can clean-up the rest of the thread
8752              * data without the lock held.
8753              */
8754 
8755             pthread_mutex_unlock(&clone_lock);
8756 
8757             thread_cpu = NULL;
8758             g_free(ts);
8759             rcu_unregister_thread();
8760             pthread_exit(NULL);
8761         }
8762 
8763         pthread_mutex_unlock(&clone_lock);
8764         preexit_cleanup(cpu_env, arg1);
8765         _exit(arg1);
8766         return 0; /* avoid warning */
8767     case TARGET_NR_read:
8768         if (arg2 == 0 && arg3 == 0) {
8769             return get_errno(safe_read(arg1, 0, 0));
8770         } else {
8771             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8772                 return -TARGET_EFAULT;
8773             ret = get_errno(safe_read(arg1, p, arg3));
8774             if (ret >= 0 &&
8775                 fd_trans_host_to_target_data(arg1)) {
8776                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8777             }
8778             unlock_user(p, arg2, ret);
8779         }
8780         return ret;
8781     case TARGET_NR_write:
8782         if (arg2 == 0 && arg3 == 0) {
8783             return get_errno(safe_write(arg1, 0, 0));
8784         }
8785         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8786             return -TARGET_EFAULT;
8787         if (fd_trans_target_to_host_data(arg1)) {
8788             void *copy = g_malloc(arg3);
8789             memcpy(copy, p, arg3);
8790             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8791             if (ret >= 0) {
8792                 ret = get_errno(safe_write(arg1, copy, ret));
8793             }
8794             g_free(copy);
8795         } else {
8796             ret = get_errno(safe_write(arg1, p, arg3));
8797         }
8798         unlock_user(p, arg2, 0);
8799         return ret;
8800 
8801 #ifdef TARGET_NR_open
8802     case TARGET_NR_open:
8803         if (!(p = lock_user_string(arg1)))
8804             return -TARGET_EFAULT;
8805         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8806                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8807                                   arg3));
8808         fd_trans_unregister(ret);
8809         unlock_user(p, arg1, 0);
8810         return ret;
8811 #endif
8812     case TARGET_NR_openat:
8813         if (!(p = lock_user_string(arg2)))
8814             return -TARGET_EFAULT;
8815         ret = get_errno(do_openat(cpu_env, arg1, p,
8816                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8817                                   arg4));
8818         fd_trans_unregister(ret);
8819         unlock_user(p, arg2, 0);
8820         return ret;
8821 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8822     case TARGET_NR_name_to_handle_at:
8823         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8824         return ret;
8825 #endif
8826 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8827     case TARGET_NR_open_by_handle_at:
8828         ret = do_open_by_handle_at(arg1, arg2, arg3);
8829         fd_trans_unregister(ret);
8830         return ret;
8831 #endif
8832 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8833     case TARGET_NR_pidfd_open:
8834         return get_errno(pidfd_open(arg1, arg2));
8835 #endif
8836 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8837     case TARGET_NR_pidfd_send_signal:
8838         {
8839             siginfo_t uinfo, *puinfo;
8840 
8841             if (arg3) {
8842                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8843                 if (!p) {
8844                     return -TARGET_EFAULT;
8845                  }
8846                  target_to_host_siginfo(&uinfo, p);
8847                  unlock_user(p, arg3, 0);
8848                  puinfo = &uinfo;
8849             } else {
8850                  puinfo = NULL;
8851             }
8852             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8853                                               puinfo, arg4));
8854         }
8855         return ret;
8856 #endif
8857 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8858     case TARGET_NR_pidfd_getfd:
8859         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8860 #endif
8861     case TARGET_NR_close:
8862         fd_trans_unregister(arg1);
8863         return get_errno(close(arg1));
8864 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8865     case TARGET_NR_close_range:
8866         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8867         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8868             abi_long fd, maxfd;
8869             maxfd = MIN(arg2, target_fd_max);
8870             for (fd = arg1; fd < maxfd; fd++) {
8871                 fd_trans_unregister(fd);
8872             }
8873         }
8874         return ret;
8875 #endif
8876 
8877     case TARGET_NR_brk:
8878         return do_brk(arg1);
8879 #ifdef TARGET_NR_fork
8880     case TARGET_NR_fork:
8881         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8882 #endif
8883 #ifdef TARGET_NR_waitpid
8884     case TARGET_NR_waitpid:
8885         {
8886             int status;
8887             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8888             if (!is_error(ret) && arg2 && ret
8889                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8890                 return -TARGET_EFAULT;
8891         }
8892         return ret;
8893 #endif
8894 #ifdef TARGET_NR_waitid
8895     case TARGET_NR_waitid:
8896         {
8897             siginfo_t info;
8898             info.si_pid = 0;
8899             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8900             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8901                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8902                     return -TARGET_EFAULT;
8903                 host_to_target_siginfo(p, &info);
8904                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8905             }
8906         }
8907         return ret;
8908 #endif
8909 #ifdef TARGET_NR_creat /* not on alpha */
8910     case TARGET_NR_creat:
8911         if (!(p = lock_user_string(arg1)))
8912             return -TARGET_EFAULT;
8913         ret = get_errno(creat(p, arg2));
8914         fd_trans_unregister(ret);
8915         unlock_user(p, arg1, 0);
8916         return ret;
8917 #endif
8918 #ifdef TARGET_NR_link
8919     case TARGET_NR_link:
8920         {
8921             void * p2;
8922             p = lock_user_string(arg1);
8923             p2 = lock_user_string(arg2);
8924             if (!p || !p2)
8925                 ret = -TARGET_EFAULT;
8926             else
8927                 ret = get_errno(link(p, p2));
8928             unlock_user(p2, arg2, 0);
8929             unlock_user(p, arg1, 0);
8930         }
8931         return ret;
8932 #endif
8933 #if defined(TARGET_NR_linkat)
8934     case TARGET_NR_linkat:
8935         {
8936             void * p2 = NULL;
8937             if (!arg2 || !arg4)
8938                 return -TARGET_EFAULT;
8939             p  = lock_user_string(arg2);
8940             p2 = lock_user_string(arg4);
8941             if (!p || !p2)
8942                 ret = -TARGET_EFAULT;
8943             else
8944                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8945             unlock_user(p, arg2, 0);
8946             unlock_user(p2, arg4, 0);
8947         }
8948         return ret;
8949 #endif
8950 #ifdef TARGET_NR_unlink
8951     case TARGET_NR_unlink:
8952         if (!(p = lock_user_string(arg1)))
8953             return -TARGET_EFAULT;
8954         ret = get_errno(unlink(p));
8955         unlock_user(p, arg1, 0);
8956         return ret;
8957 #endif
8958 #if defined(TARGET_NR_unlinkat)
8959     case TARGET_NR_unlinkat:
8960         if (!(p = lock_user_string(arg2)))
8961             return -TARGET_EFAULT;
8962         ret = get_errno(unlinkat(arg1, p, arg3));
8963         unlock_user(p, arg2, 0);
8964         return ret;
8965 #endif
8966     case TARGET_NR_execveat:
8967         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
8968     case TARGET_NR_execve:
8969         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
8970     case TARGET_NR_chdir:
8971         if (!(p = lock_user_string(arg1)))
8972             return -TARGET_EFAULT;
8973         ret = get_errno(chdir(p));
8974         unlock_user(p, arg1, 0);
8975         return ret;
8976 #ifdef TARGET_NR_time
8977     case TARGET_NR_time:
8978         {
8979             time_t host_time;
8980             ret = get_errno(time(&host_time));
8981             if (!is_error(ret)
8982                 && arg1
8983                 && put_user_sal(host_time, arg1))
8984                 return -TARGET_EFAULT;
8985         }
8986         return ret;
8987 #endif
8988 #ifdef TARGET_NR_mknod
8989     case TARGET_NR_mknod:
8990         if (!(p = lock_user_string(arg1)))
8991             return -TARGET_EFAULT;
8992         ret = get_errno(mknod(p, arg2, arg3));
8993         unlock_user(p, arg1, 0);
8994         return ret;
8995 #endif
8996 #if defined(TARGET_NR_mknodat)
8997     case TARGET_NR_mknodat:
8998         if (!(p = lock_user_string(arg2)))
8999             return -TARGET_EFAULT;
9000         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9001         unlock_user(p, arg2, 0);
9002         return ret;
9003 #endif
9004 #ifdef TARGET_NR_chmod
9005     case TARGET_NR_chmod:
9006         if (!(p = lock_user_string(arg1)))
9007             return -TARGET_EFAULT;
9008         ret = get_errno(chmod(p, arg2));
9009         unlock_user(p, arg1, 0);
9010         return ret;
9011 #endif
9012 #ifdef TARGET_NR_lseek
9013     case TARGET_NR_lseek:
9014         return get_errno(lseek(arg1, arg2, arg3));
9015 #endif
9016 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9017     /* Alpha specific */
9018     case TARGET_NR_getxpid:
9019         cpu_env->ir[IR_A4] = getppid();
9020         return get_errno(getpid());
9021 #endif
9022 #ifdef TARGET_NR_getpid
9023     case TARGET_NR_getpid:
9024         return get_errno(getpid());
9025 #endif
9026     case TARGET_NR_mount:
9027         {
9028             /* need to look at the data field */
9029             void *p2, *p3;
9030 
9031             if (arg1) {
9032                 p = lock_user_string(arg1);
9033                 if (!p) {
9034                     return -TARGET_EFAULT;
9035                 }
9036             } else {
9037                 p = NULL;
9038             }
9039 
9040             p2 = lock_user_string(arg2);
9041             if (!p2) {
9042                 if (arg1) {
9043                     unlock_user(p, arg1, 0);
9044                 }
9045                 return -TARGET_EFAULT;
9046             }
9047 
9048             if (arg3) {
9049                 p3 = lock_user_string(arg3);
9050                 if (!p3) {
9051                     if (arg1) {
9052                         unlock_user(p, arg1, 0);
9053                     }
9054                     unlock_user(p2, arg2, 0);
9055                     return -TARGET_EFAULT;
9056                 }
9057             } else {
9058                 p3 = NULL;
9059             }
9060 
9061             /* FIXME - arg5 should be locked, but it isn't clear how to
9062              * do that since it's not guaranteed to be a NULL-terminated
9063              * string.
9064              */
9065             if (!arg5) {
9066                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9067             } else {
9068                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9069             }
9070             ret = get_errno(ret);
9071 
9072             if (arg1) {
9073                 unlock_user(p, arg1, 0);
9074             }
9075             unlock_user(p2, arg2, 0);
9076             if (arg3) {
9077                 unlock_user(p3, arg3, 0);
9078             }
9079         }
9080         return ret;
9081 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9082 #if defined(TARGET_NR_umount)
9083     case TARGET_NR_umount:
9084 #endif
9085 #if defined(TARGET_NR_oldumount)
9086     case TARGET_NR_oldumount:
9087 #endif
9088         if (!(p = lock_user_string(arg1)))
9089             return -TARGET_EFAULT;
9090         ret = get_errno(umount(p));
9091         unlock_user(p, arg1, 0);
9092         return ret;
9093 #endif
9094 #ifdef TARGET_NR_stime /* not on alpha */
9095     case TARGET_NR_stime:
9096         {
9097             struct timespec ts;
9098             ts.tv_nsec = 0;
9099             if (get_user_sal(ts.tv_sec, arg1)) {
9100                 return -TARGET_EFAULT;
9101             }
9102             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9103         }
9104 #endif
9105 #ifdef TARGET_NR_alarm /* not on alpha */
9106     case TARGET_NR_alarm:
9107         return alarm(arg1);
9108 #endif
9109 #ifdef TARGET_NR_pause /* not on alpha */
9110     case TARGET_NR_pause:
9111         if (!block_signals()) {
9112             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9113         }
9114         return -TARGET_EINTR;
9115 #endif
9116 #ifdef TARGET_NR_utime
9117     case TARGET_NR_utime:
9118         {
9119             struct utimbuf tbuf, *host_tbuf;
9120             struct target_utimbuf *target_tbuf;
9121             if (arg2) {
9122                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9123                     return -TARGET_EFAULT;
9124                 tbuf.actime = tswapal(target_tbuf->actime);
9125                 tbuf.modtime = tswapal(target_tbuf->modtime);
9126                 unlock_user_struct(target_tbuf, arg2, 0);
9127                 host_tbuf = &tbuf;
9128             } else {
9129                 host_tbuf = NULL;
9130             }
9131             if (!(p = lock_user_string(arg1)))
9132                 return -TARGET_EFAULT;
9133             ret = get_errno(utime(p, host_tbuf));
9134             unlock_user(p, arg1, 0);
9135         }
9136         return ret;
9137 #endif
9138 #ifdef TARGET_NR_utimes
9139     case TARGET_NR_utimes:
9140         {
9141             struct timeval *tvp, tv[2];
9142             if (arg2) {
9143                 if (copy_from_user_timeval(&tv[0], arg2)
9144                     || copy_from_user_timeval(&tv[1],
9145                                               arg2 + sizeof(struct target_timeval)))
9146                     return -TARGET_EFAULT;
9147                 tvp = tv;
9148             } else {
9149                 tvp = NULL;
9150             }
9151             if (!(p = lock_user_string(arg1)))
9152                 return -TARGET_EFAULT;
9153             ret = get_errno(utimes(p, tvp));
9154             unlock_user(p, arg1, 0);
9155         }
9156         return ret;
9157 #endif
9158 #if defined(TARGET_NR_futimesat)
9159     case TARGET_NR_futimesat:
9160         {
9161             struct timeval *tvp, tv[2];
9162             if (arg3) {
9163                 if (copy_from_user_timeval(&tv[0], arg3)
9164                     || copy_from_user_timeval(&tv[1],
9165                                               arg3 + sizeof(struct target_timeval)))
9166                     return -TARGET_EFAULT;
9167                 tvp = tv;
9168             } else {
9169                 tvp = NULL;
9170             }
9171             if (!(p = lock_user_string(arg2))) {
9172                 return -TARGET_EFAULT;
9173             }
9174             ret = get_errno(futimesat(arg1, path(p), tvp));
9175             unlock_user(p, arg2, 0);
9176         }
9177         return ret;
9178 #endif
9179 #ifdef TARGET_NR_access
9180     case TARGET_NR_access:
9181         if (!(p = lock_user_string(arg1))) {
9182             return -TARGET_EFAULT;
9183         }
9184         ret = get_errno(access(path(p), arg2));
9185         unlock_user(p, arg1, 0);
9186         return ret;
9187 #endif
9188 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9189     case TARGET_NR_faccessat:
9190         if (!(p = lock_user_string(arg2))) {
9191             return -TARGET_EFAULT;
9192         }
9193         ret = get_errno(faccessat(arg1, p, arg3, 0));
9194         unlock_user(p, arg2, 0);
9195         return ret;
9196 #endif
9197 #if defined(TARGET_NR_faccessat2)
9198     case TARGET_NR_faccessat2:
9199         if (!(p = lock_user_string(arg2))) {
9200             return -TARGET_EFAULT;
9201         }
9202         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9203         unlock_user(p, arg2, 0);
9204         return ret;
9205 #endif
9206 #ifdef TARGET_NR_nice /* not on alpha */
9207     case TARGET_NR_nice:
9208         return get_errno(nice(arg1));
9209 #endif
9210     case TARGET_NR_sync:
9211         sync();
9212         return 0;
9213 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9214     case TARGET_NR_syncfs:
9215         return get_errno(syncfs(arg1));
9216 #endif
9217     case TARGET_NR_kill:
9218         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9219 #ifdef TARGET_NR_rename
9220     case TARGET_NR_rename:
9221         {
9222             void *p2;
9223             p = lock_user_string(arg1);
9224             p2 = lock_user_string(arg2);
9225             if (!p || !p2)
9226                 ret = -TARGET_EFAULT;
9227             else
9228                 ret = get_errno(rename(p, p2));
9229             unlock_user(p2, arg2, 0);
9230             unlock_user(p, arg1, 0);
9231         }
9232         return ret;
9233 #endif
9234 #if defined(TARGET_NR_renameat)
9235     case TARGET_NR_renameat:
9236         {
9237             void *p2;
9238             p  = lock_user_string(arg2);
9239             p2 = lock_user_string(arg4);
9240             if (!p || !p2)
9241                 ret = -TARGET_EFAULT;
9242             else
9243                 ret = get_errno(renameat(arg1, p, arg3, p2));
9244             unlock_user(p2, arg4, 0);
9245             unlock_user(p, arg2, 0);
9246         }
9247         return ret;
9248 #endif
9249 #if defined(TARGET_NR_renameat2)
9250     case TARGET_NR_renameat2:
9251         {
9252             void *p2;
9253             p  = lock_user_string(arg2);
9254             p2 = lock_user_string(arg4);
9255             if (!p || !p2) {
9256                 ret = -TARGET_EFAULT;
9257             } else {
9258                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9259             }
9260             unlock_user(p2, arg4, 0);
9261             unlock_user(p, arg2, 0);
9262         }
9263         return ret;
9264 #endif
9265 #ifdef TARGET_NR_mkdir
9266     case TARGET_NR_mkdir:
9267         if (!(p = lock_user_string(arg1)))
9268             return -TARGET_EFAULT;
9269         ret = get_errno(mkdir(p, arg2));
9270         unlock_user(p, arg1, 0);
9271         return ret;
9272 #endif
9273 #if defined(TARGET_NR_mkdirat)
9274     case TARGET_NR_mkdirat:
9275         if (!(p = lock_user_string(arg2)))
9276             return -TARGET_EFAULT;
9277         ret = get_errno(mkdirat(arg1, p, arg3));
9278         unlock_user(p, arg2, 0);
9279         return ret;
9280 #endif
9281 #ifdef TARGET_NR_rmdir
9282     case TARGET_NR_rmdir:
9283         if (!(p = lock_user_string(arg1)))
9284             return -TARGET_EFAULT;
9285         ret = get_errno(rmdir(p));
9286         unlock_user(p, arg1, 0);
9287         return ret;
9288 #endif
9289     case TARGET_NR_dup:
9290         ret = get_errno(dup(arg1));
9291         if (ret >= 0) {
9292             fd_trans_dup(arg1, ret);
9293         }
9294         return ret;
9295 #ifdef TARGET_NR_pipe
9296     case TARGET_NR_pipe:
9297         return do_pipe(cpu_env, arg1, 0, 0);
9298 #endif
9299 #ifdef TARGET_NR_pipe2
9300     case TARGET_NR_pipe2:
9301         return do_pipe(cpu_env, arg1,
9302                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9303 #endif
9304     case TARGET_NR_times:
9305         {
9306             struct target_tms *tmsp;
9307             struct tms tms;
9308             ret = get_errno(times(&tms));
9309             if (arg1) {
9310                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9311                 if (!tmsp)
9312                     return -TARGET_EFAULT;
9313                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9314                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9315                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9316                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9317             }
9318             if (!is_error(ret))
9319                 ret = host_to_target_clock_t(ret);
9320         }
9321         return ret;
9322     case TARGET_NR_acct:
9323         if (arg1 == 0) {
9324             ret = get_errno(acct(NULL));
9325         } else {
9326             if (!(p = lock_user_string(arg1))) {
9327                 return -TARGET_EFAULT;
9328             }
9329             ret = get_errno(acct(path(p)));
9330             unlock_user(p, arg1, 0);
9331         }
9332         return ret;
9333 #ifdef TARGET_NR_umount2
9334     case TARGET_NR_umount2:
9335         if (!(p = lock_user_string(arg1)))
9336             return -TARGET_EFAULT;
9337         ret = get_errno(umount2(p, arg2));
9338         unlock_user(p, arg1, 0);
9339         return ret;
9340 #endif
9341     case TARGET_NR_ioctl:
9342         return do_ioctl(arg1, arg2, arg3);
9343 #ifdef TARGET_NR_fcntl
9344     case TARGET_NR_fcntl:
9345         return do_fcntl(arg1, arg2, arg3);
9346 #endif
9347     case TARGET_NR_setpgid:
9348         return get_errno(setpgid(arg1, arg2));
9349     case TARGET_NR_umask:
9350         return get_errno(umask(arg1));
9351     case TARGET_NR_chroot:
9352         if (!(p = lock_user_string(arg1)))
9353             return -TARGET_EFAULT;
9354         ret = get_errno(chroot(p));
9355         unlock_user(p, arg1, 0);
9356         return ret;
9357 #ifdef TARGET_NR_dup2
9358     case TARGET_NR_dup2:
9359         ret = get_errno(dup2(arg1, arg2));
9360         if (ret >= 0) {
9361             fd_trans_dup(arg1, arg2);
9362         }
9363         return ret;
9364 #endif
9365 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9366     case TARGET_NR_dup3:
9367     {
9368         int host_flags;
9369 
9370         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9371             return -EINVAL;
9372         }
9373         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9374         ret = get_errno(dup3(arg1, arg2, host_flags));
9375         if (ret >= 0) {
9376             fd_trans_dup(arg1, arg2);
9377         }
9378         return ret;
9379     }
9380 #endif
9381 #ifdef TARGET_NR_getppid /* not on alpha */
9382     case TARGET_NR_getppid:
9383         return get_errno(getppid());
9384 #endif
9385 #ifdef TARGET_NR_getpgrp
9386     case TARGET_NR_getpgrp:
9387         return get_errno(getpgrp());
9388 #endif
9389     case TARGET_NR_setsid:
9390         return get_errno(setsid());
9391 #ifdef TARGET_NR_sigaction
9392     case TARGET_NR_sigaction:
9393         {
9394 #if defined(TARGET_MIPS)
9395 	    struct target_sigaction act, oact, *pact, *old_act;
9396 
9397 	    if (arg2) {
9398                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9399                     return -TARGET_EFAULT;
9400 		act._sa_handler = old_act->_sa_handler;
9401 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9402 		act.sa_flags = old_act->sa_flags;
9403 		unlock_user_struct(old_act, arg2, 0);
9404 		pact = &act;
9405 	    } else {
9406 		pact = NULL;
9407 	    }
9408 
9409         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9410 
9411 	    if (!is_error(ret) && arg3) {
9412                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9413                     return -TARGET_EFAULT;
9414 		old_act->_sa_handler = oact._sa_handler;
9415 		old_act->sa_flags = oact.sa_flags;
9416 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9417 		old_act->sa_mask.sig[1] = 0;
9418 		old_act->sa_mask.sig[2] = 0;
9419 		old_act->sa_mask.sig[3] = 0;
9420 		unlock_user_struct(old_act, arg3, 1);
9421 	    }
9422 #else
9423             struct target_old_sigaction *old_act;
9424             struct target_sigaction act, oact, *pact;
9425             if (arg2) {
9426                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9427                     return -TARGET_EFAULT;
9428                 act._sa_handler = old_act->_sa_handler;
9429                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9430                 act.sa_flags = old_act->sa_flags;
9431 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9432                 act.sa_restorer = old_act->sa_restorer;
9433 #endif
9434                 unlock_user_struct(old_act, arg2, 0);
9435                 pact = &act;
9436             } else {
9437                 pact = NULL;
9438             }
9439             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9440             if (!is_error(ret) && arg3) {
9441                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9442                     return -TARGET_EFAULT;
9443                 old_act->_sa_handler = oact._sa_handler;
9444                 old_act->sa_mask = oact.sa_mask.sig[0];
9445                 old_act->sa_flags = oact.sa_flags;
9446 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9447                 old_act->sa_restorer = oact.sa_restorer;
9448 #endif
9449                 unlock_user_struct(old_act, arg3, 1);
9450             }
9451 #endif
9452         }
9453         return ret;
9454 #endif
9455     case TARGET_NR_rt_sigaction:
9456         {
9457             /*
9458              * For Alpha and SPARC this is a 5 argument syscall, with
9459              * a 'restorer' parameter which must be copied into the
9460              * sa_restorer field of the sigaction struct.
9461              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9462              * and arg5 is the sigsetsize.
9463              */
9464 #if defined(TARGET_ALPHA)
9465             target_ulong sigsetsize = arg4;
9466             target_ulong restorer = arg5;
9467 #elif defined(TARGET_SPARC)
9468             target_ulong restorer = arg4;
9469             target_ulong sigsetsize = arg5;
9470 #else
9471             target_ulong sigsetsize = arg4;
9472             target_ulong restorer = 0;
9473 #endif
9474             struct target_sigaction *act = NULL;
9475             struct target_sigaction *oact = NULL;
9476 
9477             if (sigsetsize != sizeof(target_sigset_t)) {
9478                 return -TARGET_EINVAL;
9479             }
9480             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9481                 return -TARGET_EFAULT;
9482             }
9483             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9484                 ret = -TARGET_EFAULT;
9485             } else {
9486                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9487                 if (oact) {
9488                     unlock_user_struct(oact, arg3, 1);
9489                 }
9490             }
9491             if (act) {
9492                 unlock_user_struct(act, arg2, 0);
9493             }
9494         }
9495         return ret;
9496 #ifdef TARGET_NR_sgetmask /* not on alpha */
9497     case TARGET_NR_sgetmask:
9498         {
9499             sigset_t cur_set;
9500             abi_ulong target_set;
9501             ret = do_sigprocmask(0, NULL, &cur_set);
9502             if (!ret) {
9503                 host_to_target_old_sigset(&target_set, &cur_set);
9504                 ret = target_set;
9505             }
9506         }
9507         return ret;
9508 #endif
9509 #ifdef TARGET_NR_ssetmask /* not on alpha */
9510     case TARGET_NR_ssetmask:
9511         {
9512             sigset_t set, oset;
9513             abi_ulong target_set = arg1;
9514             target_to_host_old_sigset(&set, &target_set);
9515             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9516             if (!ret) {
9517                 host_to_target_old_sigset(&target_set, &oset);
9518                 ret = target_set;
9519             }
9520         }
9521         return ret;
9522 #endif
9523 #ifdef TARGET_NR_sigprocmask
9524     case TARGET_NR_sigprocmask:
9525         {
9526 #if defined(TARGET_ALPHA)
9527             sigset_t set, oldset;
9528             abi_ulong mask;
9529             int how;
9530 
9531             switch (arg1) {
9532             case TARGET_SIG_BLOCK:
9533                 how = SIG_BLOCK;
9534                 break;
9535             case TARGET_SIG_UNBLOCK:
9536                 how = SIG_UNBLOCK;
9537                 break;
9538             case TARGET_SIG_SETMASK:
9539                 how = SIG_SETMASK;
9540                 break;
9541             default:
9542                 return -TARGET_EINVAL;
9543             }
9544             mask = arg2;
9545             target_to_host_old_sigset(&set, &mask);
9546 
9547             ret = do_sigprocmask(how, &set, &oldset);
9548             if (!is_error(ret)) {
9549                 host_to_target_old_sigset(&mask, &oldset);
9550                 ret = mask;
9551                 cpu_env->ir[IR_V0] = 0; /* force no error */
9552             }
9553 #else
9554             sigset_t set, oldset, *set_ptr;
9555             int how;
9556 
9557             if (arg2) {
9558                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9559                 if (!p) {
9560                     return -TARGET_EFAULT;
9561                 }
9562                 target_to_host_old_sigset(&set, p);
9563                 unlock_user(p, arg2, 0);
9564                 set_ptr = &set;
9565                 switch (arg1) {
9566                 case TARGET_SIG_BLOCK:
9567                     how = SIG_BLOCK;
9568                     break;
9569                 case TARGET_SIG_UNBLOCK:
9570                     how = SIG_UNBLOCK;
9571                     break;
9572                 case TARGET_SIG_SETMASK:
9573                     how = SIG_SETMASK;
9574                     break;
9575                 default:
9576                     return -TARGET_EINVAL;
9577                 }
9578             } else {
9579                 how = 0;
9580                 set_ptr = NULL;
9581             }
9582             ret = do_sigprocmask(how, set_ptr, &oldset);
9583             if (!is_error(ret) && arg3) {
9584                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9585                     return -TARGET_EFAULT;
9586                 host_to_target_old_sigset(p, &oldset);
9587                 unlock_user(p, arg3, sizeof(target_sigset_t));
9588             }
9589 #endif
9590         }
9591         return ret;
9592 #endif
9593     case TARGET_NR_rt_sigprocmask:
9594         {
9595             int how = arg1;
9596             sigset_t set, oldset, *set_ptr;
9597 
9598             if (arg4 != sizeof(target_sigset_t)) {
9599                 return -TARGET_EINVAL;
9600             }
9601 
9602             if (arg2) {
9603                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9604                 if (!p) {
9605                     return -TARGET_EFAULT;
9606                 }
9607                 target_to_host_sigset(&set, p);
9608                 unlock_user(p, arg2, 0);
9609                 set_ptr = &set;
9610                 switch(how) {
9611                 case TARGET_SIG_BLOCK:
9612                     how = SIG_BLOCK;
9613                     break;
9614                 case TARGET_SIG_UNBLOCK:
9615                     how = SIG_UNBLOCK;
9616                     break;
9617                 case TARGET_SIG_SETMASK:
9618                     how = SIG_SETMASK;
9619                     break;
9620                 default:
9621                     return -TARGET_EINVAL;
9622                 }
9623             } else {
9624                 how = 0;
9625                 set_ptr = NULL;
9626             }
9627             ret = do_sigprocmask(how, set_ptr, &oldset);
9628             if (!is_error(ret) && arg3) {
9629                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9630                     return -TARGET_EFAULT;
9631                 host_to_target_sigset(p, &oldset);
9632                 unlock_user(p, arg3, sizeof(target_sigset_t));
9633             }
9634         }
9635         return ret;
9636 #ifdef TARGET_NR_sigpending
9637     case TARGET_NR_sigpending:
9638         {
9639             sigset_t set;
9640             ret = get_errno(sigpending(&set));
9641             if (!is_error(ret)) {
9642                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9643                     return -TARGET_EFAULT;
9644                 host_to_target_old_sigset(p, &set);
9645                 unlock_user(p, arg1, sizeof(target_sigset_t));
9646             }
9647         }
9648         return ret;
9649 #endif
9650     case TARGET_NR_rt_sigpending:
9651         {
9652             sigset_t set;
9653 
9654             /* Yes, this check is >, not != like most. We follow the kernel's
9655              * logic and it does it like this because it implements
9656              * NR_sigpending through the same code path, and in that case
9657              * the old_sigset_t is smaller in size.
9658              */
9659             if (arg2 > sizeof(target_sigset_t)) {
9660                 return -TARGET_EINVAL;
9661             }
9662 
9663             ret = get_errno(sigpending(&set));
9664             if (!is_error(ret)) {
9665                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9666                     return -TARGET_EFAULT;
9667                 host_to_target_sigset(p, &set);
9668                 unlock_user(p, arg1, sizeof(target_sigset_t));
9669             }
9670         }
9671         return ret;
9672 #ifdef TARGET_NR_sigsuspend
9673     case TARGET_NR_sigsuspend:
9674         {
9675             sigset_t *set;
9676 
9677 #if defined(TARGET_ALPHA)
9678             TaskState *ts = cpu->opaque;
9679             /* target_to_host_old_sigset will bswap back */
9680             abi_ulong mask = tswapal(arg1);
9681             set = &ts->sigsuspend_mask;
9682             target_to_host_old_sigset(set, &mask);
9683 #else
9684             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9685             if (ret != 0) {
9686                 return ret;
9687             }
9688 #endif
9689             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9690             finish_sigsuspend_mask(ret);
9691         }
9692         return ret;
9693 #endif
9694     case TARGET_NR_rt_sigsuspend:
9695         {
9696             sigset_t *set;
9697 
9698             ret = process_sigsuspend_mask(&set, arg1, arg2);
9699             if (ret != 0) {
9700                 return ret;
9701             }
9702             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9703             finish_sigsuspend_mask(ret);
9704         }
9705         return ret;
9706 #ifdef TARGET_NR_rt_sigtimedwait
9707     case TARGET_NR_rt_sigtimedwait:
9708         {
9709             sigset_t set;
9710             struct timespec uts, *puts;
9711             siginfo_t uinfo;
9712 
9713             if (arg4 != sizeof(target_sigset_t)) {
9714                 return -TARGET_EINVAL;
9715             }
9716 
9717             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9718                 return -TARGET_EFAULT;
9719             target_to_host_sigset(&set, p);
9720             unlock_user(p, arg1, 0);
9721             if (arg3) {
9722                 puts = &uts;
9723                 if (target_to_host_timespec(puts, arg3)) {
9724                     return -TARGET_EFAULT;
9725                 }
9726             } else {
9727                 puts = NULL;
9728             }
9729             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9730                                                  SIGSET_T_SIZE));
9731             if (!is_error(ret)) {
9732                 if (arg2) {
9733                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9734                                   0);
9735                     if (!p) {
9736                         return -TARGET_EFAULT;
9737                     }
9738                     host_to_target_siginfo(p, &uinfo);
9739                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9740                 }
9741                 ret = host_to_target_signal(ret);
9742             }
9743         }
9744         return ret;
9745 #endif
9746 #ifdef TARGET_NR_rt_sigtimedwait_time64
9747     case TARGET_NR_rt_sigtimedwait_time64:
9748         {
9749             sigset_t set;
9750             struct timespec uts, *puts;
9751             siginfo_t uinfo;
9752 
9753             if (arg4 != sizeof(target_sigset_t)) {
9754                 return -TARGET_EINVAL;
9755             }
9756 
9757             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9758             if (!p) {
9759                 return -TARGET_EFAULT;
9760             }
9761             target_to_host_sigset(&set, p);
9762             unlock_user(p, arg1, 0);
9763             if (arg3) {
9764                 puts = &uts;
9765                 if (target_to_host_timespec64(puts, arg3)) {
9766                     return -TARGET_EFAULT;
9767                 }
9768             } else {
9769                 puts = NULL;
9770             }
9771             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9772                                                  SIGSET_T_SIZE));
9773             if (!is_error(ret)) {
9774                 if (arg2) {
9775                     p = lock_user(VERIFY_WRITE, arg2,
9776                                   sizeof(target_siginfo_t), 0);
9777                     if (!p) {
9778                         return -TARGET_EFAULT;
9779                     }
9780                     host_to_target_siginfo(p, &uinfo);
9781                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9782                 }
9783                 ret = host_to_target_signal(ret);
9784             }
9785         }
9786         return ret;
9787 #endif
9788     case TARGET_NR_rt_sigqueueinfo:
9789         {
9790             siginfo_t uinfo;
9791 
9792             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9793             if (!p) {
9794                 return -TARGET_EFAULT;
9795             }
9796             target_to_host_siginfo(&uinfo, p);
9797             unlock_user(p, arg3, 0);
9798             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9799         }
9800         return ret;
9801     case TARGET_NR_rt_tgsigqueueinfo:
9802         {
9803             siginfo_t uinfo;
9804 
9805             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9806             if (!p) {
9807                 return -TARGET_EFAULT;
9808             }
9809             target_to_host_siginfo(&uinfo, p);
9810             unlock_user(p, arg4, 0);
9811             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9812         }
9813         return ret;
9814 #ifdef TARGET_NR_sigreturn
9815     case TARGET_NR_sigreturn:
9816         if (block_signals()) {
9817             return -QEMU_ERESTARTSYS;
9818         }
9819         return do_sigreturn(cpu_env);
9820 #endif
9821     case TARGET_NR_rt_sigreturn:
9822         if (block_signals()) {
9823             return -QEMU_ERESTARTSYS;
9824         }
9825         return do_rt_sigreturn(cpu_env);
9826     case TARGET_NR_sethostname:
9827         if (!(p = lock_user_string(arg1)))
9828             return -TARGET_EFAULT;
9829         ret = get_errno(sethostname(p, arg2));
9830         unlock_user(p, arg1, 0);
9831         return ret;
9832 #ifdef TARGET_NR_setrlimit
9833     case TARGET_NR_setrlimit:
9834         {
9835             int resource = target_to_host_resource(arg1);
9836             struct target_rlimit *target_rlim;
9837             struct rlimit rlim;
9838             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9839                 return -TARGET_EFAULT;
9840             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9841             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9842             unlock_user_struct(target_rlim, arg2, 0);
9843             /*
9844              * If we just passed through resource limit settings for memory then
9845              * they would also apply to QEMU's own allocations, and QEMU will
9846              * crash or hang or die if its allocations fail. Ideally we would
9847              * track the guest allocations in QEMU and apply the limits ourselves.
9848              * For now, just tell the guest the call succeeded but don't actually
9849              * limit anything.
9850              */
9851             if (resource != RLIMIT_AS &&
9852                 resource != RLIMIT_DATA &&
9853                 resource != RLIMIT_STACK) {
9854                 return get_errno(setrlimit(resource, &rlim));
9855             } else {
9856                 return 0;
9857             }
9858         }
9859 #endif
9860 #ifdef TARGET_NR_getrlimit
9861     case TARGET_NR_getrlimit:
9862         {
9863             int resource = target_to_host_resource(arg1);
9864             struct target_rlimit *target_rlim;
9865             struct rlimit rlim;
9866 
9867             ret = get_errno(getrlimit(resource, &rlim));
9868             if (!is_error(ret)) {
9869                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9870                     return -TARGET_EFAULT;
9871                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9872                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9873                 unlock_user_struct(target_rlim, arg2, 1);
9874             }
9875         }
9876         return ret;
9877 #endif
9878     case TARGET_NR_getrusage:
9879         {
9880             struct rusage rusage;
9881             ret = get_errno(getrusage(arg1, &rusage));
9882             if (!is_error(ret)) {
9883                 ret = host_to_target_rusage(arg2, &rusage);
9884             }
9885         }
9886         return ret;
9887 #if defined(TARGET_NR_gettimeofday)
9888     case TARGET_NR_gettimeofday:
9889         {
9890             struct timeval tv;
9891             struct timezone tz;
9892 
9893             ret = get_errno(gettimeofday(&tv, &tz));
9894             if (!is_error(ret)) {
9895                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9896                     return -TARGET_EFAULT;
9897                 }
9898                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9899                     return -TARGET_EFAULT;
9900                 }
9901             }
9902         }
9903         return ret;
9904 #endif
9905 #if defined(TARGET_NR_settimeofday)
9906     case TARGET_NR_settimeofday:
9907         {
9908             struct timeval tv, *ptv = NULL;
9909             struct timezone tz, *ptz = NULL;
9910 
9911             if (arg1) {
9912                 if (copy_from_user_timeval(&tv, arg1)) {
9913                     return -TARGET_EFAULT;
9914                 }
9915                 ptv = &tv;
9916             }
9917 
9918             if (arg2) {
9919                 if (copy_from_user_timezone(&tz, arg2)) {
9920                     return -TARGET_EFAULT;
9921                 }
9922                 ptz = &tz;
9923             }
9924 
9925             return get_errno(settimeofday(ptv, ptz));
9926         }
9927 #endif
9928 #if defined(TARGET_NR_select)
9929     case TARGET_NR_select:
9930 #if defined(TARGET_WANT_NI_OLD_SELECT)
9931         /* some architectures used to have old_select here
9932          * but now ENOSYS it.
9933          */
9934         ret = -TARGET_ENOSYS;
9935 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9936         ret = do_old_select(arg1);
9937 #else
9938         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9939 #endif
9940         return ret;
9941 #endif
9942 #ifdef TARGET_NR_pselect6
9943     case TARGET_NR_pselect6:
9944         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9945 #endif
9946 #ifdef TARGET_NR_pselect6_time64
9947     case TARGET_NR_pselect6_time64:
9948         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9949 #endif
9950 #ifdef TARGET_NR_symlink
9951     case TARGET_NR_symlink:
9952         {
9953             void *p2;
9954             p = lock_user_string(arg1);
9955             p2 = lock_user_string(arg2);
9956             if (!p || !p2)
9957                 ret = -TARGET_EFAULT;
9958             else
9959                 ret = get_errno(symlink(p, p2));
9960             unlock_user(p2, arg2, 0);
9961             unlock_user(p, arg1, 0);
9962         }
9963         return ret;
9964 #endif
9965 #if defined(TARGET_NR_symlinkat)
9966     case TARGET_NR_symlinkat:
9967         {
9968             void *p2;
9969             p  = lock_user_string(arg1);
9970             p2 = lock_user_string(arg3);
9971             if (!p || !p2)
9972                 ret = -TARGET_EFAULT;
9973             else
9974                 ret = get_errno(symlinkat(p, arg2, p2));
9975             unlock_user(p2, arg3, 0);
9976             unlock_user(p, arg1, 0);
9977         }
9978         return ret;
9979 #endif
9980 #ifdef TARGET_NR_readlink
9981     case TARGET_NR_readlink:
9982         {
9983             void *p2;
9984             p = lock_user_string(arg1);
9985             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9986             if (!p || !p2) {
9987                 ret = -TARGET_EFAULT;
9988             } else if (!arg3) {
9989                 /* Short circuit this for the magic exe check. */
9990                 ret = -TARGET_EINVAL;
9991             } else if (is_proc_myself((const char *)p, "exe")) {
9992                 char real[PATH_MAX], *temp;
9993                 temp = realpath(exec_path, real);
9994                 /* Return value is # of bytes that we wrote to the buffer. */
9995                 if (temp == NULL) {
9996                     ret = get_errno(-1);
9997                 } else {
9998                     /* Don't worry about sign mismatch as earlier mapping
9999                      * logic would have thrown a bad address error. */
10000                     ret = MIN(strlen(real), arg3);
10001                     /* We cannot NUL terminate the string. */
10002                     memcpy(p2, real, ret);
10003                 }
10004             } else {
10005                 ret = get_errno(readlink(path(p), p2, arg3));
10006             }
10007             unlock_user(p2, arg2, ret);
10008             unlock_user(p, arg1, 0);
10009         }
10010         return ret;
10011 #endif
10012 #if defined(TARGET_NR_readlinkat)
10013     case TARGET_NR_readlinkat:
10014         {
10015             void *p2;
10016             p  = lock_user_string(arg2);
10017             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10018             if (!p || !p2) {
10019                 ret = -TARGET_EFAULT;
10020             } else if (!arg4) {
10021                 /* Short circuit this for the magic exe check. */
10022                 ret = -TARGET_EINVAL;
10023             } else if (is_proc_myself((const char *)p, "exe")) {
10024                 char real[PATH_MAX], *temp;
10025                 temp = realpath(exec_path, real);
10026                 /* Return value is # of bytes that we wrote to the buffer. */
10027                 if (temp == NULL) {
10028                     ret = get_errno(-1);
10029                 } else {
10030                     /* Don't worry about sign mismatch as earlier mapping
10031                      * logic would have thrown a bad address error. */
10032                     ret = MIN(strlen(real), arg4);
10033                     /* We cannot NUL terminate the string. */
10034                     memcpy(p2, real, ret);
10035                 }
10036             } else {
10037                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10038             }
10039             unlock_user(p2, arg3, ret);
10040             unlock_user(p, arg2, 0);
10041         }
10042         return ret;
10043 #endif
10044 #ifdef TARGET_NR_swapon
10045     case TARGET_NR_swapon:
10046         if (!(p = lock_user_string(arg1)))
10047             return -TARGET_EFAULT;
10048         ret = get_errno(swapon(p, arg2));
10049         unlock_user(p, arg1, 0);
10050         return ret;
10051 #endif
10052     case TARGET_NR_reboot:
10053         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10054            /* arg4 must be ignored in all other cases */
10055            p = lock_user_string(arg4);
10056            if (!p) {
10057                return -TARGET_EFAULT;
10058            }
10059            ret = get_errno(reboot(arg1, arg2, arg3, p));
10060            unlock_user(p, arg4, 0);
10061         } else {
10062            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10063         }
10064         return ret;
10065 #ifdef TARGET_NR_mmap
10066     case TARGET_NR_mmap:
10067 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10068     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10069     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10070     || defined(TARGET_S390X)
10071         {
10072             abi_ulong *v;
10073             abi_ulong v1, v2, v3, v4, v5, v6;
10074             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10075                 return -TARGET_EFAULT;
10076             v1 = tswapal(v[0]);
10077             v2 = tswapal(v[1]);
10078             v3 = tswapal(v[2]);
10079             v4 = tswapal(v[3]);
10080             v5 = tswapal(v[4]);
10081             v6 = tswapal(v[5]);
10082             unlock_user(v, arg1, 0);
10083             ret = get_errno(target_mmap(v1, v2, v3,
10084                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10085                                         v5, v6));
10086         }
10087 #else
10088         /* mmap pointers are always untagged */
10089         ret = get_errno(target_mmap(arg1, arg2, arg3,
10090                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10091                                     arg5,
10092                                     arg6));
10093 #endif
10094         return ret;
10095 #endif
10096 #ifdef TARGET_NR_mmap2
10097     case TARGET_NR_mmap2:
10098 #ifndef MMAP_SHIFT
10099 #define MMAP_SHIFT 12
10100 #endif
10101         ret = target_mmap(arg1, arg2, arg3,
10102                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10103                           arg5, arg6 << MMAP_SHIFT);
10104         return get_errno(ret);
10105 #endif
10106     case TARGET_NR_munmap:
10107         arg1 = cpu_untagged_addr(cpu, arg1);
10108         return get_errno(target_munmap(arg1, arg2));
10109     case TARGET_NR_mprotect:
10110         arg1 = cpu_untagged_addr(cpu, arg1);
10111         {
10112             TaskState *ts = cpu->opaque;
10113             /* Special hack to detect libc making the stack executable.  */
10114             if ((arg3 & PROT_GROWSDOWN)
10115                 && arg1 >= ts->info->stack_limit
10116                 && arg1 <= ts->info->start_stack) {
10117                 arg3 &= ~PROT_GROWSDOWN;
10118                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10119                 arg1 = ts->info->stack_limit;
10120             }
10121         }
10122         return get_errno(target_mprotect(arg1, arg2, arg3));
10123 #ifdef TARGET_NR_mremap
10124     case TARGET_NR_mremap:
10125         arg1 = cpu_untagged_addr(cpu, arg1);
10126         /* mremap new_addr (arg5) is always untagged */
10127         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10128 #endif
10129         /* ??? msync/mlock/munlock are broken for softmmu.  */
10130 #ifdef TARGET_NR_msync
10131     case TARGET_NR_msync:
10132         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10133 #endif
10134 #ifdef TARGET_NR_mlock
10135     case TARGET_NR_mlock:
10136         return get_errno(mlock(g2h(cpu, arg1), arg2));
10137 #endif
10138 #ifdef TARGET_NR_munlock
10139     case TARGET_NR_munlock:
10140         return get_errno(munlock(g2h(cpu, arg1), arg2));
10141 #endif
10142 #ifdef TARGET_NR_mlockall
10143     case TARGET_NR_mlockall:
10144         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10145 #endif
10146 #ifdef TARGET_NR_munlockall
10147     case TARGET_NR_munlockall:
10148         return get_errno(munlockall());
10149 #endif
10150 #ifdef TARGET_NR_truncate
10151     case TARGET_NR_truncate:
10152         if (!(p = lock_user_string(arg1)))
10153             return -TARGET_EFAULT;
10154         ret = get_errno(truncate(p, arg2));
10155         unlock_user(p, arg1, 0);
10156         return ret;
10157 #endif
10158 #ifdef TARGET_NR_ftruncate
10159     case TARGET_NR_ftruncate:
10160         return get_errno(ftruncate(arg1, arg2));
10161 #endif
10162     case TARGET_NR_fchmod:
10163         return get_errno(fchmod(arg1, arg2));
10164 #if defined(TARGET_NR_fchmodat)
10165     case TARGET_NR_fchmodat:
10166         if (!(p = lock_user_string(arg2)))
10167             return -TARGET_EFAULT;
10168         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10169         unlock_user(p, arg2, 0);
10170         return ret;
10171 #endif
10172     case TARGET_NR_getpriority:
10173         /* Note that negative values are valid for getpriority, so we must
10174            differentiate based on errno settings.  */
10175         errno = 0;
10176         ret = getpriority(arg1, arg2);
10177         if (ret == -1 && errno != 0) {
10178             return -host_to_target_errno(errno);
10179         }
10180 #ifdef TARGET_ALPHA
10181         /* Return value is the unbiased priority.  Signal no error.  */
10182         cpu_env->ir[IR_V0] = 0;
10183 #else
10184         /* Return value is a biased priority to avoid negative numbers.  */
10185         ret = 20 - ret;
10186 #endif
10187         return ret;
10188     case TARGET_NR_setpriority:
10189         return get_errno(setpriority(arg1, arg2, arg3));
10190 #ifdef TARGET_NR_statfs
10191     case TARGET_NR_statfs:
10192         if (!(p = lock_user_string(arg1))) {
10193             return -TARGET_EFAULT;
10194         }
10195         ret = get_errno(statfs(path(p), &stfs));
10196         unlock_user(p, arg1, 0);
10197     convert_statfs:
10198         if (!is_error(ret)) {
10199             struct target_statfs *target_stfs;
10200 
10201             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10202                 return -TARGET_EFAULT;
10203             __put_user(stfs.f_type, &target_stfs->f_type);
10204             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10205             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10206             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10207             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10208             __put_user(stfs.f_files, &target_stfs->f_files);
10209             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10210             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10211             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10212             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10213             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10214 #ifdef _STATFS_F_FLAGS
10215             __put_user(stfs.f_flags, &target_stfs->f_flags);
10216 #else
10217             __put_user(0, &target_stfs->f_flags);
10218 #endif
10219             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10220             unlock_user_struct(target_stfs, arg2, 1);
10221         }
10222         return ret;
10223 #endif
10224 #ifdef TARGET_NR_fstatfs
10225     case TARGET_NR_fstatfs:
10226         ret = get_errno(fstatfs(arg1, &stfs));
10227         goto convert_statfs;
10228 #endif
10229 #ifdef TARGET_NR_statfs64
10230     case TARGET_NR_statfs64:
10231         if (!(p = lock_user_string(arg1))) {
10232             return -TARGET_EFAULT;
10233         }
10234         ret = get_errno(statfs(path(p), &stfs));
10235         unlock_user(p, arg1, 0);
10236     convert_statfs64:
10237         if (!is_error(ret)) {
10238             struct target_statfs64 *target_stfs;
10239 
10240             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10241                 return -TARGET_EFAULT;
10242             __put_user(stfs.f_type, &target_stfs->f_type);
10243             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10244             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10245             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10246             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10247             __put_user(stfs.f_files, &target_stfs->f_files);
10248             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10249             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10250             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10251             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10252             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10253 #ifdef _STATFS_F_FLAGS
10254             __put_user(stfs.f_flags, &target_stfs->f_flags);
10255 #else
10256             __put_user(0, &target_stfs->f_flags);
10257 #endif
10258             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10259             unlock_user_struct(target_stfs, arg3, 1);
10260         }
10261         return ret;
10262     case TARGET_NR_fstatfs64:
10263         ret = get_errno(fstatfs(arg1, &stfs));
10264         goto convert_statfs64;
10265 #endif
10266 #ifdef TARGET_NR_socketcall
10267     case TARGET_NR_socketcall:
10268         return do_socketcall(arg1, arg2);
10269 #endif
10270 #ifdef TARGET_NR_accept
10271     case TARGET_NR_accept:
10272         return do_accept4(arg1, arg2, arg3, 0);
10273 #endif
10274 #ifdef TARGET_NR_accept4
10275     case TARGET_NR_accept4:
10276         return do_accept4(arg1, arg2, arg3, arg4);
10277 #endif
10278 #ifdef TARGET_NR_bind
10279     case TARGET_NR_bind:
10280         return do_bind(arg1, arg2, arg3);
10281 #endif
10282 #ifdef TARGET_NR_connect
10283     case TARGET_NR_connect:
10284         return do_connect(arg1, arg2, arg3);
10285 #endif
10286 #ifdef TARGET_NR_getpeername
10287     case TARGET_NR_getpeername:
10288         return do_getpeername(arg1, arg2, arg3);
10289 #endif
10290 #ifdef TARGET_NR_getsockname
10291     case TARGET_NR_getsockname:
10292         return do_getsockname(arg1, arg2, arg3);
10293 #endif
10294 #ifdef TARGET_NR_getsockopt
10295     case TARGET_NR_getsockopt:
10296         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10297 #endif
10298 #ifdef TARGET_NR_listen
10299     case TARGET_NR_listen:
10300         return get_errno(listen(arg1, arg2));
10301 #endif
10302 #ifdef TARGET_NR_recv
10303     case TARGET_NR_recv:
10304         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10305 #endif
10306 #ifdef TARGET_NR_recvfrom
10307     case TARGET_NR_recvfrom:
10308         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10309 #endif
10310 #ifdef TARGET_NR_recvmsg
10311     case TARGET_NR_recvmsg:
10312         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10313 #endif
10314 #ifdef TARGET_NR_send
10315     case TARGET_NR_send:
10316         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10317 #endif
10318 #ifdef TARGET_NR_sendmsg
10319     case TARGET_NR_sendmsg:
10320         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10321 #endif
10322 #ifdef TARGET_NR_sendmmsg
10323     case TARGET_NR_sendmmsg:
10324         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10325 #endif
10326 #ifdef TARGET_NR_recvmmsg
10327     case TARGET_NR_recvmmsg:
10328         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10329 #endif
10330 #ifdef TARGET_NR_sendto
10331     case TARGET_NR_sendto:
10332         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10333 #endif
10334 #ifdef TARGET_NR_shutdown
10335     case TARGET_NR_shutdown:
10336         return get_errno(shutdown(arg1, arg2));
10337 #endif
10338 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10339     case TARGET_NR_getrandom:
10340         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10341         if (!p) {
10342             return -TARGET_EFAULT;
10343         }
10344         ret = get_errno(getrandom(p, arg2, arg3));
10345         unlock_user(p, arg1, ret);
10346         return ret;
10347 #endif
10348 #ifdef TARGET_NR_socket
10349     case TARGET_NR_socket:
10350         return do_socket(arg1, arg2, arg3);
10351 #endif
10352 #ifdef TARGET_NR_socketpair
10353     case TARGET_NR_socketpair:
10354         return do_socketpair(arg1, arg2, arg3, arg4);
10355 #endif
10356 #ifdef TARGET_NR_setsockopt
10357     case TARGET_NR_setsockopt:
10358         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10359 #endif
10360 #if defined(TARGET_NR_syslog)
10361     case TARGET_NR_syslog:
10362         {
10363             int len = arg2;
10364 
10365             switch (arg1) {
10366             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10367             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10368             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10369             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10370             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10371             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10372             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10373             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10374                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10375             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10376             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10377             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10378                 {
10379                     if (len < 0) {
10380                         return -TARGET_EINVAL;
10381                     }
10382                     if (len == 0) {
10383                         return 0;
10384                     }
10385                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10386                     if (!p) {
10387                         return -TARGET_EFAULT;
10388                     }
10389                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10390                     unlock_user(p, arg2, arg3);
10391                 }
10392                 return ret;
10393             default:
10394                 return -TARGET_EINVAL;
10395             }
10396         }
10397         break;
10398 #endif
10399     case TARGET_NR_setitimer:
10400         {
10401             struct itimerval value, ovalue, *pvalue;
10402 
10403             if (arg2) {
10404                 pvalue = &value;
10405                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10406                     || copy_from_user_timeval(&pvalue->it_value,
10407                                               arg2 + sizeof(struct target_timeval)))
10408                     return -TARGET_EFAULT;
10409             } else {
10410                 pvalue = NULL;
10411             }
10412             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10413             if (!is_error(ret) && arg3) {
10414                 if (copy_to_user_timeval(arg3,
10415                                          &ovalue.it_interval)
10416                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10417                                             &ovalue.it_value))
10418                     return -TARGET_EFAULT;
10419             }
10420         }
10421         return ret;
10422     case TARGET_NR_getitimer:
10423         {
10424             struct itimerval value;
10425 
10426             ret = get_errno(getitimer(arg1, &value));
10427             if (!is_error(ret) && arg2) {
10428                 if (copy_to_user_timeval(arg2,
10429                                          &value.it_interval)
10430                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10431                                             &value.it_value))
10432                     return -TARGET_EFAULT;
10433             }
10434         }
10435         return ret;
10436 #ifdef TARGET_NR_stat
10437     case TARGET_NR_stat:
10438         if (!(p = lock_user_string(arg1))) {
10439             return -TARGET_EFAULT;
10440         }
10441         ret = get_errno(stat(path(p), &st));
10442         unlock_user(p, arg1, 0);
10443         goto do_stat;
10444 #endif
10445 #ifdef TARGET_NR_lstat
10446     case TARGET_NR_lstat:
10447         if (!(p = lock_user_string(arg1))) {
10448             return -TARGET_EFAULT;
10449         }
10450         ret = get_errno(lstat(path(p), &st));
10451         unlock_user(p, arg1, 0);
10452         goto do_stat;
10453 #endif
10454 #ifdef TARGET_NR_fstat
10455     case TARGET_NR_fstat:
10456         {
10457             ret = get_errno(fstat(arg1, &st));
10458 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10459         do_stat:
10460 #endif
10461             if (!is_error(ret)) {
10462                 struct target_stat *target_st;
10463 
10464                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10465                     return -TARGET_EFAULT;
10466                 memset(target_st, 0, sizeof(*target_st));
10467                 __put_user(st.st_dev, &target_st->st_dev);
10468                 __put_user(st.st_ino, &target_st->st_ino);
10469                 __put_user(st.st_mode, &target_st->st_mode);
10470                 __put_user(st.st_uid, &target_st->st_uid);
10471                 __put_user(st.st_gid, &target_st->st_gid);
10472                 __put_user(st.st_nlink, &target_st->st_nlink);
10473                 __put_user(st.st_rdev, &target_st->st_rdev);
10474                 __put_user(st.st_size, &target_st->st_size);
10475                 __put_user(st.st_blksize, &target_st->st_blksize);
10476                 __put_user(st.st_blocks, &target_st->st_blocks);
10477                 __put_user(st.st_atime, &target_st->target_st_atime);
10478                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10479                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10480 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10481                 __put_user(st.st_atim.tv_nsec,
10482                            &target_st->target_st_atime_nsec);
10483                 __put_user(st.st_mtim.tv_nsec,
10484                            &target_st->target_st_mtime_nsec);
10485                 __put_user(st.st_ctim.tv_nsec,
10486                            &target_st->target_st_ctime_nsec);
10487 #endif
10488                 unlock_user_struct(target_st, arg2, 1);
10489             }
10490         }
10491         return ret;
10492 #endif
10493     case TARGET_NR_vhangup:
10494         return get_errno(vhangup());
10495 #ifdef TARGET_NR_syscall
10496     case TARGET_NR_syscall:
10497         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10498                           arg6, arg7, arg8, 0);
10499 #endif
10500 #if defined(TARGET_NR_wait4)
10501     case TARGET_NR_wait4:
10502         {
10503             int status;
10504             abi_long status_ptr = arg2;
10505             struct rusage rusage, *rusage_ptr;
10506             abi_ulong target_rusage = arg4;
10507             abi_long rusage_err;
10508             if (target_rusage)
10509                 rusage_ptr = &rusage;
10510             else
10511                 rusage_ptr = NULL;
10512             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10513             if (!is_error(ret)) {
10514                 if (status_ptr && ret) {
10515                     status = host_to_target_waitstatus(status);
10516                     if (put_user_s32(status, status_ptr))
10517                         return -TARGET_EFAULT;
10518                 }
10519                 if (target_rusage) {
10520                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10521                     if (rusage_err) {
10522                         ret = rusage_err;
10523                     }
10524                 }
10525             }
10526         }
10527         return ret;
10528 #endif
10529 #ifdef TARGET_NR_swapoff
10530     case TARGET_NR_swapoff:
10531         if (!(p = lock_user_string(arg1)))
10532             return -TARGET_EFAULT;
10533         ret = get_errno(swapoff(p));
10534         unlock_user(p, arg1, 0);
10535         return ret;
10536 #endif
10537     case TARGET_NR_sysinfo:
10538         {
10539             struct target_sysinfo *target_value;
10540             struct sysinfo value;
10541             ret = get_errno(sysinfo(&value));
10542             if (!is_error(ret) && arg1)
10543             {
10544                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10545                     return -TARGET_EFAULT;
10546                 __put_user(value.uptime, &target_value->uptime);
10547                 __put_user(value.loads[0], &target_value->loads[0]);
10548                 __put_user(value.loads[1], &target_value->loads[1]);
10549                 __put_user(value.loads[2], &target_value->loads[2]);
10550                 __put_user(value.totalram, &target_value->totalram);
10551                 __put_user(value.freeram, &target_value->freeram);
10552                 __put_user(value.sharedram, &target_value->sharedram);
10553                 __put_user(value.bufferram, &target_value->bufferram);
10554                 __put_user(value.totalswap, &target_value->totalswap);
10555                 __put_user(value.freeswap, &target_value->freeswap);
10556                 __put_user(value.procs, &target_value->procs);
10557                 __put_user(value.totalhigh, &target_value->totalhigh);
10558                 __put_user(value.freehigh, &target_value->freehigh);
10559                 __put_user(value.mem_unit, &target_value->mem_unit);
10560                 unlock_user_struct(target_value, arg1, 1);
10561             }
10562         }
10563         return ret;
10564 #ifdef TARGET_NR_ipc
10565     case TARGET_NR_ipc:
10566         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10567 #endif
10568 #ifdef TARGET_NR_semget
10569     case TARGET_NR_semget:
10570         return get_errno(semget(arg1, arg2, arg3));
10571 #endif
10572 #ifdef TARGET_NR_semop
10573     case TARGET_NR_semop:
10574         return do_semtimedop(arg1, arg2, arg3, 0, false);
10575 #endif
10576 #ifdef TARGET_NR_semtimedop
10577     case TARGET_NR_semtimedop:
10578         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10579 #endif
10580 #ifdef TARGET_NR_semtimedop_time64
10581     case TARGET_NR_semtimedop_time64:
10582         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10583 #endif
10584 #ifdef TARGET_NR_semctl
10585     case TARGET_NR_semctl:
10586         return do_semctl(arg1, arg2, arg3, arg4);
10587 #endif
10588 #ifdef TARGET_NR_msgctl
10589     case TARGET_NR_msgctl:
10590         return do_msgctl(arg1, arg2, arg3);
10591 #endif
10592 #ifdef TARGET_NR_msgget
10593     case TARGET_NR_msgget:
10594         return get_errno(msgget(arg1, arg2));
10595 #endif
10596 #ifdef TARGET_NR_msgrcv
10597     case TARGET_NR_msgrcv:
10598         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10599 #endif
10600 #ifdef TARGET_NR_msgsnd
10601     case TARGET_NR_msgsnd:
10602         return do_msgsnd(arg1, arg2, arg3, arg4);
10603 #endif
10604 #ifdef TARGET_NR_shmget
10605     case TARGET_NR_shmget:
10606         return get_errno(shmget(arg1, arg2, arg3));
10607 #endif
10608 #ifdef TARGET_NR_shmctl
10609     case TARGET_NR_shmctl:
10610         return do_shmctl(arg1, arg2, arg3);
10611 #endif
10612 #ifdef TARGET_NR_shmat
10613     case TARGET_NR_shmat:
10614         return do_shmat(cpu_env, arg1, arg2, arg3);
10615 #endif
10616 #ifdef TARGET_NR_shmdt
10617     case TARGET_NR_shmdt:
10618         return do_shmdt(arg1);
10619 #endif
10620     case TARGET_NR_fsync:
10621         return get_errno(fsync(arg1));
10622     case TARGET_NR_clone:
10623         /* Linux manages to have three different orderings for its
10624          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10625          * match the kernel's CONFIG_CLONE_* settings.
10626          * Microblaze is further special in that it uses a sixth
10627          * implicit argument to clone for the TLS pointer.
10628          */
10629 #if defined(TARGET_MICROBLAZE)
10630         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10631 #elif defined(TARGET_CLONE_BACKWARDS)
10632         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10633 #elif defined(TARGET_CLONE_BACKWARDS2)
10634         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10635 #else
10636         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10637 #endif
10638         return ret;
10639 #ifdef __NR_exit_group
10640         /* new thread calls */
10641     case TARGET_NR_exit_group:
10642         preexit_cleanup(cpu_env, arg1);
10643         return get_errno(exit_group(arg1));
10644 #endif
10645     case TARGET_NR_setdomainname:
10646         if (!(p = lock_user_string(arg1)))
10647             return -TARGET_EFAULT;
10648         ret = get_errno(setdomainname(p, arg2));
10649         unlock_user(p, arg1, 0);
10650         return ret;
10651     case TARGET_NR_uname:
10652         /* no need to transcode because we use the linux syscall */
10653         {
10654             struct new_utsname * buf;
10655 
10656             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10657                 return -TARGET_EFAULT;
10658             ret = get_errno(sys_uname(buf));
10659             if (!is_error(ret)) {
10660                 /* Overwrite the native machine name with whatever is being
10661                    emulated. */
10662                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10663                           sizeof(buf->machine));
10664                 /* Allow the user to override the reported release.  */
10665                 if (qemu_uname_release && *qemu_uname_release) {
10666                     g_strlcpy(buf->release, qemu_uname_release,
10667                               sizeof(buf->release));
10668                 }
10669             }
10670             unlock_user_struct(buf, arg1, 1);
10671         }
10672         return ret;
10673 #ifdef TARGET_I386
10674     case TARGET_NR_modify_ldt:
10675         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10676 #if !defined(TARGET_X86_64)
10677     case TARGET_NR_vm86:
10678         return do_vm86(cpu_env, arg1, arg2);
10679 #endif
10680 #endif
10681 #if defined(TARGET_NR_adjtimex)
10682     case TARGET_NR_adjtimex:
10683         {
10684             struct timex host_buf;
10685 
10686             if (target_to_host_timex(&host_buf, arg1) != 0) {
10687                 return -TARGET_EFAULT;
10688             }
10689             ret = get_errno(adjtimex(&host_buf));
10690             if (!is_error(ret)) {
10691                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10692                     return -TARGET_EFAULT;
10693                 }
10694             }
10695         }
10696         return ret;
10697 #endif
10698 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10699     case TARGET_NR_clock_adjtime:
10700         {
10701             struct timex htx, *phtx = &htx;
10702 
10703             if (target_to_host_timex(phtx, arg2) != 0) {
10704                 return -TARGET_EFAULT;
10705             }
10706             ret = get_errno(clock_adjtime(arg1, phtx));
10707             if (!is_error(ret) && phtx) {
10708                 if (host_to_target_timex(arg2, phtx) != 0) {
10709                     return -TARGET_EFAULT;
10710                 }
10711             }
10712         }
10713         return ret;
10714 #endif
10715 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10716     case TARGET_NR_clock_adjtime64:
10717         {
10718             struct timex htx;
10719 
10720             if (target_to_host_timex64(&htx, arg2) != 0) {
10721                 return -TARGET_EFAULT;
10722             }
10723             ret = get_errno(clock_adjtime(arg1, &htx));
10724             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10725                     return -TARGET_EFAULT;
10726             }
10727         }
10728         return ret;
10729 #endif
10730     case TARGET_NR_getpgid:
10731         return get_errno(getpgid(arg1));
10732     case TARGET_NR_fchdir:
10733         return get_errno(fchdir(arg1));
10734     case TARGET_NR_personality:
10735         return get_errno(personality(arg1));
10736 #ifdef TARGET_NR__llseek /* Not on alpha */
10737     case TARGET_NR__llseek:
10738         {
10739             int64_t res;
10740 #if !defined(__NR_llseek)
10741             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10742             if (res == -1) {
10743                 ret = get_errno(res);
10744             } else {
10745                 ret = 0;
10746             }
10747 #else
10748             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10749 #endif
10750             if ((ret == 0) && put_user_s64(res, arg4)) {
10751                 return -TARGET_EFAULT;
10752             }
10753         }
10754         return ret;
10755 #endif
10756 #ifdef TARGET_NR_getdents
10757     case TARGET_NR_getdents:
10758         return do_getdents(arg1, arg2, arg3);
10759 #endif /* TARGET_NR_getdents */
10760 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10761     case TARGET_NR_getdents64:
10762         return do_getdents64(arg1, arg2, arg3);
10763 #endif /* TARGET_NR_getdents64 */
10764 #if defined(TARGET_NR__newselect)
10765     case TARGET_NR__newselect:
10766         return do_select(arg1, arg2, arg3, arg4, arg5);
10767 #endif
10768 #ifdef TARGET_NR_poll
10769     case TARGET_NR_poll:
10770         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10771 #endif
10772 #ifdef TARGET_NR_ppoll
10773     case TARGET_NR_ppoll:
10774         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10775 #endif
10776 #ifdef TARGET_NR_ppoll_time64
10777     case TARGET_NR_ppoll_time64:
10778         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10779 #endif
10780     case TARGET_NR_flock:
10781         /* NOTE: the flock constant seems to be the same for every
10782            Linux platform */
10783         return get_errno(safe_flock(arg1, arg2));
10784     case TARGET_NR_readv:
10785         {
10786             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10787             if (vec != NULL) {
10788                 ret = get_errno(safe_readv(arg1, vec, arg3));
10789                 unlock_iovec(vec, arg2, arg3, 1);
10790             } else {
10791                 ret = -host_to_target_errno(errno);
10792             }
10793         }
10794         return ret;
10795     case TARGET_NR_writev:
10796         {
10797             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10798             if (vec != NULL) {
10799                 ret = get_errno(safe_writev(arg1, vec, arg3));
10800                 unlock_iovec(vec, arg2, arg3, 0);
10801             } else {
10802                 ret = -host_to_target_errno(errno);
10803             }
10804         }
10805         return ret;
10806 #if defined(TARGET_NR_preadv)
10807     case TARGET_NR_preadv:
10808         {
10809             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10810             if (vec != NULL) {
10811                 unsigned long low, high;
10812 
10813                 target_to_host_low_high(arg4, arg5, &low, &high);
10814                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10815                 unlock_iovec(vec, arg2, arg3, 1);
10816             } else {
10817                 ret = -host_to_target_errno(errno);
10818            }
10819         }
10820         return ret;
10821 #endif
10822 #if defined(TARGET_NR_pwritev)
10823     case TARGET_NR_pwritev:
10824         {
10825             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10826             if (vec != NULL) {
10827                 unsigned long low, high;
10828 
10829                 target_to_host_low_high(arg4, arg5, &low, &high);
10830                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10831                 unlock_iovec(vec, arg2, arg3, 0);
10832             } else {
10833                 ret = -host_to_target_errno(errno);
10834            }
10835         }
10836         return ret;
10837 #endif
10838     case TARGET_NR_getsid:
10839         return get_errno(getsid(arg1));
10840 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10841     case TARGET_NR_fdatasync:
10842         return get_errno(fdatasync(arg1));
10843 #endif
10844     case TARGET_NR_sched_getaffinity:
10845         {
10846             unsigned int mask_size;
10847             unsigned long *mask;
10848 
10849             /*
10850              * sched_getaffinity needs multiples of ulong, so need to take
10851              * care of mismatches between target ulong and host ulong sizes.
10852              */
10853             if (arg2 & (sizeof(abi_ulong) - 1)) {
10854                 return -TARGET_EINVAL;
10855             }
10856             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10857 
10858             mask = alloca(mask_size);
10859             memset(mask, 0, mask_size);
10860             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10861 
10862             if (!is_error(ret)) {
10863                 if (ret > arg2) {
10864                     /* More data returned than the caller's buffer will fit.
10865                      * This only happens if sizeof(abi_long) < sizeof(long)
10866                      * and the caller passed us a buffer holding an odd number
10867                      * of abi_longs. If the host kernel is actually using the
10868                      * extra 4 bytes then fail EINVAL; otherwise we can just
10869                      * ignore them and only copy the interesting part.
10870                      */
10871                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10872                     if (numcpus > arg2 * 8) {
10873                         return -TARGET_EINVAL;
10874                     }
10875                     ret = arg2;
10876                 }
10877 
10878                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10879                     return -TARGET_EFAULT;
10880                 }
10881             }
10882         }
10883         return ret;
10884     case TARGET_NR_sched_setaffinity:
10885         {
10886             unsigned int mask_size;
10887             unsigned long *mask;
10888 
10889             /*
10890              * sched_setaffinity needs multiples of ulong, so need to take
10891              * care of mismatches between target ulong and host ulong sizes.
10892              */
10893             if (arg2 & (sizeof(abi_ulong) - 1)) {
10894                 return -TARGET_EINVAL;
10895             }
10896             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10897             mask = alloca(mask_size);
10898 
10899             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10900             if (ret) {
10901                 return ret;
10902             }
10903 
10904             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10905         }
10906     case TARGET_NR_getcpu:
10907         {
10908             unsigned cpu, node;
10909             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10910                                        arg2 ? &node : NULL,
10911                                        NULL));
10912             if (is_error(ret)) {
10913                 return ret;
10914             }
10915             if (arg1 && put_user_u32(cpu, arg1)) {
10916                 return -TARGET_EFAULT;
10917             }
10918             if (arg2 && put_user_u32(node, arg2)) {
10919                 return -TARGET_EFAULT;
10920             }
10921         }
10922         return ret;
10923     case TARGET_NR_sched_setparam:
10924         {
10925             struct target_sched_param *target_schp;
10926             struct sched_param schp;
10927 
10928             if (arg2 == 0) {
10929                 return -TARGET_EINVAL;
10930             }
10931             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10932                 return -TARGET_EFAULT;
10933             }
10934             schp.sched_priority = tswap32(target_schp->sched_priority);
10935             unlock_user_struct(target_schp, arg2, 0);
10936             return get_errno(sys_sched_setparam(arg1, &schp));
10937         }
10938     case TARGET_NR_sched_getparam:
10939         {
10940             struct target_sched_param *target_schp;
10941             struct sched_param schp;
10942 
10943             if (arg2 == 0) {
10944                 return -TARGET_EINVAL;
10945             }
10946             ret = get_errno(sys_sched_getparam(arg1, &schp));
10947             if (!is_error(ret)) {
10948                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10949                     return -TARGET_EFAULT;
10950                 }
10951                 target_schp->sched_priority = tswap32(schp.sched_priority);
10952                 unlock_user_struct(target_schp, arg2, 1);
10953             }
10954         }
10955         return ret;
10956     case TARGET_NR_sched_setscheduler:
10957         {
10958             struct target_sched_param *target_schp;
10959             struct sched_param schp;
10960             if (arg3 == 0) {
10961                 return -TARGET_EINVAL;
10962             }
10963             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10964                 return -TARGET_EFAULT;
10965             }
10966             schp.sched_priority = tswap32(target_schp->sched_priority);
10967             unlock_user_struct(target_schp, arg3, 0);
10968             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10969         }
10970     case TARGET_NR_sched_getscheduler:
10971         return get_errno(sys_sched_getscheduler(arg1));
10972     case TARGET_NR_sched_getattr:
10973         {
10974             struct target_sched_attr *target_scha;
10975             struct sched_attr scha;
10976             if (arg2 == 0) {
10977                 return -TARGET_EINVAL;
10978             }
10979             if (arg3 > sizeof(scha)) {
10980                 arg3 = sizeof(scha);
10981             }
10982             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10983             if (!is_error(ret)) {
10984                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10985                 if (!target_scha) {
10986                     return -TARGET_EFAULT;
10987                 }
10988                 target_scha->size = tswap32(scha.size);
10989                 target_scha->sched_policy = tswap32(scha.sched_policy);
10990                 target_scha->sched_flags = tswap64(scha.sched_flags);
10991                 target_scha->sched_nice = tswap32(scha.sched_nice);
10992                 target_scha->sched_priority = tswap32(scha.sched_priority);
10993                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10994                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10995                 target_scha->sched_period = tswap64(scha.sched_period);
10996                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10997                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10998                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10999                 }
11000                 unlock_user(target_scha, arg2, arg3);
11001             }
11002             return ret;
11003         }
11004     case TARGET_NR_sched_setattr:
11005         {
11006             struct target_sched_attr *target_scha;
11007             struct sched_attr scha;
11008             uint32_t size;
11009             int zeroed;
11010             if (arg2 == 0) {
11011                 return -TARGET_EINVAL;
11012             }
11013             if (get_user_u32(size, arg2)) {
11014                 return -TARGET_EFAULT;
11015             }
11016             if (!size) {
11017                 size = offsetof(struct target_sched_attr, sched_util_min);
11018             }
11019             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11020                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11021                     return -TARGET_EFAULT;
11022                 }
11023                 return -TARGET_E2BIG;
11024             }
11025 
11026             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11027             if (zeroed < 0) {
11028                 return zeroed;
11029             } else if (zeroed == 0) {
11030                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11031                     return -TARGET_EFAULT;
11032                 }
11033                 return -TARGET_E2BIG;
11034             }
11035             if (size > sizeof(struct target_sched_attr)) {
11036                 size = sizeof(struct target_sched_attr);
11037             }
11038 
11039             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11040             if (!target_scha) {
11041                 return -TARGET_EFAULT;
11042             }
11043             scha.size = size;
11044             scha.sched_policy = tswap32(target_scha->sched_policy);
11045             scha.sched_flags = tswap64(target_scha->sched_flags);
11046             scha.sched_nice = tswap32(target_scha->sched_nice);
11047             scha.sched_priority = tswap32(target_scha->sched_priority);
11048             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11049             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11050             scha.sched_period = tswap64(target_scha->sched_period);
11051             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11052                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11053                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11054             }
11055             unlock_user(target_scha, arg2, 0);
11056             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11057         }
11058     case TARGET_NR_sched_yield:
11059         return get_errno(sched_yield());
11060     case TARGET_NR_sched_get_priority_max:
11061         return get_errno(sched_get_priority_max(arg1));
11062     case TARGET_NR_sched_get_priority_min:
11063         return get_errno(sched_get_priority_min(arg1));
11064 #ifdef TARGET_NR_sched_rr_get_interval
11065     case TARGET_NR_sched_rr_get_interval:
11066         {
11067             struct timespec ts;
11068             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11069             if (!is_error(ret)) {
11070                 ret = host_to_target_timespec(arg2, &ts);
11071             }
11072         }
11073         return ret;
11074 #endif
11075 #ifdef TARGET_NR_sched_rr_get_interval_time64
11076     case TARGET_NR_sched_rr_get_interval_time64:
11077         {
11078             struct timespec ts;
11079             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11080             if (!is_error(ret)) {
11081                 ret = host_to_target_timespec64(arg2, &ts);
11082             }
11083         }
11084         return ret;
11085 #endif
11086 #if defined(TARGET_NR_nanosleep)
11087     case TARGET_NR_nanosleep:
11088         {
11089             struct timespec req, rem;
11090             target_to_host_timespec(&req, arg1);
11091             ret = get_errno(safe_nanosleep(&req, &rem));
11092             if (is_error(ret) && arg2) {
11093                 host_to_target_timespec(arg2, &rem);
11094             }
11095         }
11096         return ret;
11097 #endif
11098     case TARGET_NR_prctl:
11099         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11100         break;
11101 #ifdef TARGET_NR_arch_prctl
11102     case TARGET_NR_arch_prctl:
11103         return do_arch_prctl(cpu_env, arg1, arg2);
11104 #endif
11105 #ifdef TARGET_NR_pread64
11106     case TARGET_NR_pread64:
11107         if (regpairs_aligned(cpu_env, num)) {
11108             arg4 = arg5;
11109             arg5 = arg6;
11110         }
11111         if (arg2 == 0 && arg3 == 0) {
11112             /* Special-case NULL buffer and zero length, which should succeed */
11113             p = 0;
11114         } else {
11115             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11116             if (!p) {
11117                 return -TARGET_EFAULT;
11118             }
11119         }
11120         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11121         unlock_user(p, arg2, ret);
11122         return ret;
11123     case TARGET_NR_pwrite64:
11124         if (regpairs_aligned(cpu_env, num)) {
11125             arg4 = arg5;
11126             arg5 = arg6;
11127         }
11128         if (arg2 == 0 && arg3 == 0) {
11129             /* Special-case NULL buffer and zero length, which should succeed */
11130             p = 0;
11131         } else {
11132             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11133             if (!p) {
11134                 return -TARGET_EFAULT;
11135             }
11136         }
11137         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11138         unlock_user(p, arg2, 0);
11139         return ret;
11140 #endif
11141     case TARGET_NR_getcwd:
11142         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11143             return -TARGET_EFAULT;
11144         ret = get_errno(sys_getcwd1(p, arg2));
11145         unlock_user(p, arg1, ret);
11146         return ret;
11147     case TARGET_NR_capget:
11148     case TARGET_NR_capset:
11149     {
11150         struct target_user_cap_header *target_header;
11151         struct target_user_cap_data *target_data = NULL;
11152         struct __user_cap_header_struct header;
11153         struct __user_cap_data_struct data[2];
11154         struct __user_cap_data_struct *dataptr = NULL;
11155         int i, target_datalen;
11156         int data_items = 1;
11157 
11158         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11159             return -TARGET_EFAULT;
11160         }
11161         header.version = tswap32(target_header->version);
11162         header.pid = tswap32(target_header->pid);
11163 
11164         if (header.version != _LINUX_CAPABILITY_VERSION) {
11165             /* Version 2 and up takes pointer to two user_data structs */
11166             data_items = 2;
11167         }
11168 
11169         target_datalen = sizeof(*target_data) * data_items;
11170 
11171         if (arg2) {
11172             if (num == TARGET_NR_capget) {
11173                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11174             } else {
11175                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11176             }
11177             if (!target_data) {
11178                 unlock_user_struct(target_header, arg1, 0);
11179                 return -TARGET_EFAULT;
11180             }
11181 
11182             if (num == TARGET_NR_capset) {
11183                 for (i = 0; i < data_items; i++) {
11184                     data[i].effective = tswap32(target_data[i].effective);
11185                     data[i].permitted = tswap32(target_data[i].permitted);
11186                     data[i].inheritable = tswap32(target_data[i].inheritable);
11187                 }
11188             }
11189 
11190             dataptr = data;
11191         }
11192 
11193         if (num == TARGET_NR_capget) {
11194             ret = get_errno(capget(&header, dataptr));
11195         } else {
11196             ret = get_errno(capset(&header, dataptr));
11197         }
11198 
11199         /* The kernel always updates version for both capget and capset */
11200         target_header->version = tswap32(header.version);
11201         unlock_user_struct(target_header, arg1, 1);
11202 
11203         if (arg2) {
11204             if (num == TARGET_NR_capget) {
11205                 for (i = 0; i < data_items; i++) {
11206                     target_data[i].effective = tswap32(data[i].effective);
11207                     target_data[i].permitted = tswap32(data[i].permitted);
11208                     target_data[i].inheritable = tswap32(data[i].inheritable);
11209                 }
11210                 unlock_user(target_data, arg2, target_datalen);
11211             } else {
11212                 unlock_user(target_data, arg2, 0);
11213             }
11214         }
11215         return ret;
11216     }
11217     case TARGET_NR_sigaltstack:
11218         return do_sigaltstack(arg1, arg2, cpu_env);
11219 
11220 #ifdef CONFIG_SENDFILE
11221 #ifdef TARGET_NR_sendfile
11222     case TARGET_NR_sendfile:
11223     {
11224         off_t *offp = NULL;
11225         off_t off;
11226         if (arg3) {
11227             ret = get_user_sal(off, arg3);
11228             if (is_error(ret)) {
11229                 return ret;
11230             }
11231             offp = &off;
11232         }
11233         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11234         if (!is_error(ret) && arg3) {
11235             abi_long ret2 = put_user_sal(off, arg3);
11236             if (is_error(ret2)) {
11237                 ret = ret2;
11238             }
11239         }
11240         return ret;
11241     }
11242 #endif
11243 #ifdef TARGET_NR_sendfile64
11244     case TARGET_NR_sendfile64:
11245     {
11246         off_t *offp = NULL;
11247         off_t off;
11248         if (arg3) {
11249             ret = get_user_s64(off, arg3);
11250             if (is_error(ret)) {
11251                 return ret;
11252             }
11253             offp = &off;
11254         }
11255         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11256         if (!is_error(ret) && arg3) {
11257             abi_long ret2 = put_user_s64(off, arg3);
11258             if (is_error(ret2)) {
11259                 ret = ret2;
11260             }
11261         }
11262         return ret;
11263     }
11264 #endif
11265 #endif
11266 #ifdef TARGET_NR_vfork
11267     case TARGET_NR_vfork:
11268         return get_errno(do_fork(cpu_env,
11269                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11270                          0, 0, 0, 0));
11271 #endif
11272 #ifdef TARGET_NR_ugetrlimit
11273     case TARGET_NR_ugetrlimit:
11274     {
11275 	struct rlimit rlim;
11276 	int resource = target_to_host_resource(arg1);
11277 	ret = get_errno(getrlimit(resource, &rlim));
11278 	if (!is_error(ret)) {
11279 	    struct target_rlimit *target_rlim;
11280             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11281                 return -TARGET_EFAULT;
11282 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11283 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11284             unlock_user_struct(target_rlim, arg2, 1);
11285 	}
11286         return ret;
11287     }
11288 #endif
11289 #ifdef TARGET_NR_truncate64
11290     case TARGET_NR_truncate64:
11291         if (!(p = lock_user_string(arg1)))
11292             return -TARGET_EFAULT;
11293 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11294         unlock_user(p, arg1, 0);
11295         return ret;
11296 #endif
11297 #ifdef TARGET_NR_ftruncate64
11298     case TARGET_NR_ftruncate64:
11299         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11300 #endif
11301 #ifdef TARGET_NR_stat64
11302     case TARGET_NR_stat64:
11303         if (!(p = lock_user_string(arg1))) {
11304             return -TARGET_EFAULT;
11305         }
11306         ret = get_errno(stat(path(p), &st));
11307         unlock_user(p, arg1, 0);
11308         if (!is_error(ret))
11309             ret = host_to_target_stat64(cpu_env, arg2, &st);
11310         return ret;
11311 #endif
11312 #ifdef TARGET_NR_lstat64
11313     case TARGET_NR_lstat64:
11314         if (!(p = lock_user_string(arg1))) {
11315             return -TARGET_EFAULT;
11316         }
11317         ret = get_errno(lstat(path(p), &st));
11318         unlock_user(p, arg1, 0);
11319         if (!is_error(ret))
11320             ret = host_to_target_stat64(cpu_env, arg2, &st);
11321         return ret;
11322 #endif
11323 #ifdef TARGET_NR_fstat64
11324     case TARGET_NR_fstat64:
11325         ret = get_errno(fstat(arg1, &st));
11326         if (!is_error(ret))
11327             ret = host_to_target_stat64(cpu_env, arg2, &st);
11328         return ret;
11329 #endif
11330 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11331 #ifdef TARGET_NR_fstatat64
11332     case TARGET_NR_fstatat64:
11333 #endif
11334 #ifdef TARGET_NR_newfstatat
11335     case TARGET_NR_newfstatat:
11336 #endif
11337         if (!(p = lock_user_string(arg2))) {
11338             return -TARGET_EFAULT;
11339         }
11340         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11341         unlock_user(p, arg2, 0);
11342         if (!is_error(ret))
11343             ret = host_to_target_stat64(cpu_env, arg3, &st);
11344         return ret;
11345 #endif
11346 #if defined(TARGET_NR_statx)
11347     case TARGET_NR_statx:
11348         {
11349             struct target_statx *target_stx;
11350             int dirfd = arg1;
11351             int flags = arg3;
11352 
11353             p = lock_user_string(arg2);
11354             if (p == NULL) {
11355                 return -TARGET_EFAULT;
11356             }
11357 #if defined(__NR_statx)
11358             {
11359                 /*
11360                  * It is assumed that struct statx is architecture independent.
11361                  */
11362                 struct target_statx host_stx;
11363                 int mask = arg4;
11364 
11365                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11366                 if (!is_error(ret)) {
11367                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11368                         unlock_user(p, arg2, 0);
11369                         return -TARGET_EFAULT;
11370                     }
11371                 }
11372 
11373                 if (ret != -TARGET_ENOSYS) {
11374                     unlock_user(p, arg2, 0);
11375                     return ret;
11376                 }
11377             }
11378 #endif
11379             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11380             unlock_user(p, arg2, 0);
11381 
11382             if (!is_error(ret)) {
11383                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11384                     return -TARGET_EFAULT;
11385                 }
11386                 memset(target_stx, 0, sizeof(*target_stx));
11387                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11388                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11389                 __put_user(st.st_ino, &target_stx->stx_ino);
11390                 __put_user(st.st_mode, &target_stx->stx_mode);
11391                 __put_user(st.st_uid, &target_stx->stx_uid);
11392                 __put_user(st.st_gid, &target_stx->stx_gid);
11393                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11394                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11395                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11396                 __put_user(st.st_size, &target_stx->stx_size);
11397                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11398                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11399                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11400                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11401                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11402                 unlock_user_struct(target_stx, arg5, 1);
11403             }
11404         }
11405         return ret;
11406 #endif
11407 #ifdef TARGET_NR_lchown
11408     case TARGET_NR_lchown:
11409         if (!(p = lock_user_string(arg1)))
11410             return -TARGET_EFAULT;
11411         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11412         unlock_user(p, arg1, 0);
11413         return ret;
11414 #endif
11415 #ifdef TARGET_NR_getuid
11416     case TARGET_NR_getuid:
11417         return get_errno(high2lowuid(getuid()));
11418 #endif
11419 #ifdef TARGET_NR_getgid
11420     case TARGET_NR_getgid:
11421         return get_errno(high2lowgid(getgid()));
11422 #endif
11423 #ifdef TARGET_NR_geteuid
11424     case TARGET_NR_geteuid:
11425         return get_errno(high2lowuid(geteuid()));
11426 #endif
11427 #ifdef TARGET_NR_getegid
11428     case TARGET_NR_getegid:
11429         return get_errno(high2lowgid(getegid()));
11430 #endif
11431     case TARGET_NR_setreuid:
11432         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11433     case TARGET_NR_setregid:
11434         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11435     case TARGET_NR_getgroups:
11436         {
11437             int gidsetsize = arg1;
11438             target_id *target_grouplist;
11439             gid_t *grouplist;
11440             int i;
11441 
11442             grouplist = alloca(gidsetsize * sizeof(gid_t));
11443             ret = get_errno(getgroups(gidsetsize, grouplist));
11444             if (gidsetsize == 0)
11445                 return ret;
11446             if (!is_error(ret)) {
11447                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11448                 if (!target_grouplist)
11449                     return -TARGET_EFAULT;
11450                 for(i = 0;i < ret; i++)
11451                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11452                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11453             }
11454         }
11455         return ret;
11456     case TARGET_NR_setgroups:
11457         {
11458             int gidsetsize = arg1;
11459             target_id *target_grouplist;
11460             gid_t *grouplist = NULL;
11461             int i;
11462             if (gidsetsize) {
11463                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11464                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11465                 if (!target_grouplist) {
11466                     return -TARGET_EFAULT;
11467                 }
11468                 for (i = 0; i < gidsetsize; i++) {
11469                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11470                 }
11471                 unlock_user(target_grouplist, arg2, 0);
11472             }
11473             return get_errno(setgroups(gidsetsize, grouplist));
11474         }
11475     case TARGET_NR_fchown:
11476         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11477 #if defined(TARGET_NR_fchownat)
11478     case TARGET_NR_fchownat:
11479         if (!(p = lock_user_string(arg2)))
11480             return -TARGET_EFAULT;
11481         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11482                                  low2highgid(arg4), arg5));
11483         unlock_user(p, arg2, 0);
11484         return ret;
11485 #endif
11486 #ifdef TARGET_NR_setresuid
11487     case TARGET_NR_setresuid:
11488         return get_errno(sys_setresuid(low2highuid(arg1),
11489                                        low2highuid(arg2),
11490                                        low2highuid(arg3)));
11491 #endif
11492 #ifdef TARGET_NR_getresuid
11493     case TARGET_NR_getresuid:
11494         {
11495             uid_t ruid, euid, suid;
11496             ret = get_errno(getresuid(&ruid, &euid, &suid));
11497             if (!is_error(ret)) {
11498                 if (put_user_id(high2lowuid(ruid), arg1)
11499                     || put_user_id(high2lowuid(euid), arg2)
11500                     || put_user_id(high2lowuid(suid), arg3))
11501                     return -TARGET_EFAULT;
11502             }
11503         }
11504         return ret;
11505 #endif
11506 #ifdef TARGET_NR_getresgid
11507     case TARGET_NR_setresgid:
11508         return get_errno(sys_setresgid(low2highgid(arg1),
11509                                        low2highgid(arg2),
11510                                        low2highgid(arg3)));
11511 #endif
11512 #ifdef TARGET_NR_getresgid
11513     case TARGET_NR_getresgid:
11514         {
11515             gid_t rgid, egid, sgid;
11516             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11517             if (!is_error(ret)) {
11518                 if (put_user_id(high2lowgid(rgid), arg1)
11519                     || put_user_id(high2lowgid(egid), arg2)
11520                     || put_user_id(high2lowgid(sgid), arg3))
11521                     return -TARGET_EFAULT;
11522             }
11523         }
11524         return ret;
11525 #endif
11526 #ifdef TARGET_NR_chown
11527     case TARGET_NR_chown:
11528         if (!(p = lock_user_string(arg1)))
11529             return -TARGET_EFAULT;
11530         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11531         unlock_user(p, arg1, 0);
11532         return ret;
11533 #endif
11534     case TARGET_NR_setuid:
11535         return get_errno(sys_setuid(low2highuid(arg1)));
11536     case TARGET_NR_setgid:
11537         return get_errno(sys_setgid(low2highgid(arg1)));
11538     case TARGET_NR_setfsuid:
11539         return get_errno(setfsuid(arg1));
11540     case TARGET_NR_setfsgid:
11541         return get_errno(setfsgid(arg1));
11542 
11543 #ifdef TARGET_NR_lchown32
11544     case TARGET_NR_lchown32:
11545         if (!(p = lock_user_string(arg1)))
11546             return -TARGET_EFAULT;
11547         ret = get_errno(lchown(p, arg2, arg3));
11548         unlock_user(p, arg1, 0);
11549         return ret;
11550 #endif
11551 #ifdef TARGET_NR_getuid32
11552     case TARGET_NR_getuid32:
11553         return get_errno(getuid());
11554 #endif
11555 
11556 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11557    /* Alpha specific */
11558     case TARGET_NR_getxuid:
11559          {
11560             uid_t euid;
11561             euid=geteuid();
11562             cpu_env->ir[IR_A4]=euid;
11563          }
11564         return get_errno(getuid());
11565 #endif
11566 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11567    /* Alpha specific */
11568     case TARGET_NR_getxgid:
11569          {
11570             uid_t egid;
11571             egid=getegid();
11572             cpu_env->ir[IR_A4]=egid;
11573          }
11574         return get_errno(getgid());
11575 #endif
11576 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11577     /* Alpha specific */
11578     case TARGET_NR_osf_getsysinfo:
11579         ret = -TARGET_EOPNOTSUPP;
11580         switch (arg1) {
11581           case TARGET_GSI_IEEE_FP_CONTROL:
11582             {
11583                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11584                 uint64_t swcr = cpu_env->swcr;
11585 
11586                 swcr &= ~SWCR_STATUS_MASK;
11587                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11588 
11589                 if (put_user_u64 (swcr, arg2))
11590                         return -TARGET_EFAULT;
11591                 ret = 0;
11592             }
11593             break;
11594 
11595           /* case GSI_IEEE_STATE_AT_SIGNAL:
11596              -- Not implemented in linux kernel.
11597              case GSI_UACPROC:
11598              -- Retrieves current unaligned access state; not much used.
11599              case GSI_PROC_TYPE:
11600              -- Retrieves implver information; surely not used.
11601              case GSI_GET_HWRPB:
11602              -- Grabs a copy of the HWRPB; surely not used.
11603           */
11604         }
11605         return ret;
11606 #endif
11607 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11608     /* Alpha specific */
11609     case TARGET_NR_osf_setsysinfo:
11610         ret = -TARGET_EOPNOTSUPP;
11611         switch (arg1) {
11612           case TARGET_SSI_IEEE_FP_CONTROL:
11613             {
11614                 uint64_t swcr, fpcr;
11615 
11616                 if (get_user_u64 (swcr, arg2)) {
11617                     return -TARGET_EFAULT;
11618                 }
11619 
11620                 /*
11621                  * The kernel calls swcr_update_status to update the
11622                  * status bits from the fpcr at every point that it
11623                  * could be queried.  Therefore, we store the status
11624                  * bits only in FPCR.
11625                  */
11626                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11627 
11628                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11629                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11630                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11631                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11632                 ret = 0;
11633             }
11634             break;
11635 
11636           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11637             {
11638                 uint64_t exc, fpcr, fex;
11639 
11640                 if (get_user_u64(exc, arg2)) {
11641                     return -TARGET_EFAULT;
11642                 }
11643                 exc &= SWCR_STATUS_MASK;
11644                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11645 
11646                 /* Old exceptions are not signaled.  */
11647                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11648                 fex = exc & ~fex;
11649                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11650                 fex &= (cpu_env)->swcr;
11651 
11652                 /* Update the hardware fpcr.  */
11653                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11654                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11655 
11656                 if (fex) {
11657                     int si_code = TARGET_FPE_FLTUNK;
11658                     target_siginfo_t info;
11659 
11660                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11661                         si_code = TARGET_FPE_FLTUND;
11662                     }
11663                     if (fex & SWCR_TRAP_ENABLE_INE) {
11664                         si_code = TARGET_FPE_FLTRES;
11665                     }
11666                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11667                         si_code = TARGET_FPE_FLTUND;
11668                     }
11669                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11670                         si_code = TARGET_FPE_FLTOVF;
11671                     }
11672                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11673                         si_code = TARGET_FPE_FLTDIV;
11674                     }
11675                     if (fex & SWCR_TRAP_ENABLE_INV) {
11676                         si_code = TARGET_FPE_FLTINV;
11677                     }
11678 
11679                     info.si_signo = SIGFPE;
11680                     info.si_errno = 0;
11681                     info.si_code = si_code;
11682                     info._sifields._sigfault._addr = (cpu_env)->pc;
11683                     queue_signal(cpu_env, info.si_signo,
11684                                  QEMU_SI_FAULT, &info);
11685                 }
11686                 ret = 0;
11687             }
11688             break;
11689 
11690           /* case SSI_NVPAIRS:
11691              -- Used with SSIN_UACPROC to enable unaligned accesses.
11692              case SSI_IEEE_STATE_AT_SIGNAL:
11693              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11694              -- Not implemented in linux kernel
11695           */
11696         }
11697         return ret;
11698 #endif
11699 #ifdef TARGET_NR_osf_sigprocmask
11700     /* Alpha specific.  */
11701     case TARGET_NR_osf_sigprocmask:
11702         {
11703             abi_ulong mask;
11704             int how;
11705             sigset_t set, oldset;
11706 
11707             switch(arg1) {
11708             case TARGET_SIG_BLOCK:
11709                 how = SIG_BLOCK;
11710                 break;
11711             case TARGET_SIG_UNBLOCK:
11712                 how = SIG_UNBLOCK;
11713                 break;
11714             case TARGET_SIG_SETMASK:
11715                 how = SIG_SETMASK;
11716                 break;
11717             default:
11718                 return -TARGET_EINVAL;
11719             }
11720             mask = arg2;
11721             target_to_host_old_sigset(&set, &mask);
11722             ret = do_sigprocmask(how, &set, &oldset);
11723             if (!ret) {
11724                 host_to_target_old_sigset(&mask, &oldset);
11725                 ret = mask;
11726             }
11727         }
11728         return ret;
11729 #endif
11730 
11731 #ifdef TARGET_NR_getgid32
11732     case TARGET_NR_getgid32:
11733         return get_errno(getgid());
11734 #endif
11735 #ifdef TARGET_NR_geteuid32
11736     case TARGET_NR_geteuid32:
11737         return get_errno(geteuid());
11738 #endif
11739 #ifdef TARGET_NR_getegid32
11740     case TARGET_NR_getegid32:
11741         return get_errno(getegid());
11742 #endif
11743 #ifdef TARGET_NR_setreuid32
11744     case TARGET_NR_setreuid32:
11745         return get_errno(setreuid(arg1, arg2));
11746 #endif
11747 #ifdef TARGET_NR_setregid32
11748     case TARGET_NR_setregid32:
11749         return get_errno(setregid(arg1, arg2));
11750 #endif
11751 #ifdef TARGET_NR_getgroups32
11752     case TARGET_NR_getgroups32:
11753         {
11754             int gidsetsize = arg1;
11755             uint32_t *target_grouplist;
11756             gid_t *grouplist;
11757             int i;
11758 
11759             grouplist = alloca(gidsetsize * sizeof(gid_t));
11760             ret = get_errno(getgroups(gidsetsize, grouplist));
11761             if (gidsetsize == 0)
11762                 return ret;
11763             if (!is_error(ret)) {
11764                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11765                 if (!target_grouplist) {
11766                     return -TARGET_EFAULT;
11767                 }
11768                 for(i = 0;i < ret; i++)
11769                     target_grouplist[i] = tswap32(grouplist[i]);
11770                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11771             }
11772         }
11773         return ret;
11774 #endif
11775 #ifdef TARGET_NR_setgroups32
11776     case TARGET_NR_setgroups32:
11777         {
11778             int gidsetsize = arg1;
11779             uint32_t *target_grouplist;
11780             gid_t *grouplist;
11781             int i;
11782 
11783             grouplist = alloca(gidsetsize * sizeof(gid_t));
11784             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11785             if (!target_grouplist) {
11786                 return -TARGET_EFAULT;
11787             }
11788             for(i = 0;i < gidsetsize; i++)
11789                 grouplist[i] = tswap32(target_grouplist[i]);
11790             unlock_user(target_grouplist, arg2, 0);
11791             return get_errno(setgroups(gidsetsize, grouplist));
11792         }
11793 #endif
11794 #ifdef TARGET_NR_fchown32
11795     case TARGET_NR_fchown32:
11796         return get_errno(fchown(arg1, arg2, arg3));
11797 #endif
11798 #ifdef TARGET_NR_setresuid32
11799     case TARGET_NR_setresuid32:
11800         return get_errno(sys_setresuid(arg1, arg2, arg3));
11801 #endif
11802 #ifdef TARGET_NR_getresuid32
11803     case TARGET_NR_getresuid32:
11804         {
11805             uid_t ruid, euid, suid;
11806             ret = get_errno(getresuid(&ruid, &euid, &suid));
11807             if (!is_error(ret)) {
11808                 if (put_user_u32(ruid, arg1)
11809                     || put_user_u32(euid, arg2)
11810                     || put_user_u32(suid, arg3))
11811                     return -TARGET_EFAULT;
11812             }
11813         }
11814         return ret;
11815 #endif
11816 #ifdef TARGET_NR_setresgid32
11817     case TARGET_NR_setresgid32:
11818         return get_errno(sys_setresgid(arg1, arg2, arg3));
11819 #endif
11820 #ifdef TARGET_NR_getresgid32
11821     case TARGET_NR_getresgid32:
11822         {
11823             gid_t rgid, egid, sgid;
11824             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11825             if (!is_error(ret)) {
11826                 if (put_user_u32(rgid, arg1)
11827                     || put_user_u32(egid, arg2)
11828                     || put_user_u32(sgid, arg3))
11829                     return -TARGET_EFAULT;
11830             }
11831         }
11832         return ret;
11833 #endif
11834 #ifdef TARGET_NR_chown32
11835     case TARGET_NR_chown32:
11836         if (!(p = lock_user_string(arg1)))
11837             return -TARGET_EFAULT;
11838         ret = get_errno(chown(p, arg2, arg3));
11839         unlock_user(p, arg1, 0);
11840         return ret;
11841 #endif
11842 #ifdef TARGET_NR_setuid32
11843     case TARGET_NR_setuid32:
11844         return get_errno(sys_setuid(arg1));
11845 #endif
11846 #ifdef TARGET_NR_setgid32
11847     case TARGET_NR_setgid32:
11848         return get_errno(sys_setgid(arg1));
11849 #endif
11850 #ifdef TARGET_NR_setfsuid32
11851     case TARGET_NR_setfsuid32:
11852         return get_errno(setfsuid(arg1));
11853 #endif
11854 #ifdef TARGET_NR_setfsgid32
11855     case TARGET_NR_setfsgid32:
11856         return get_errno(setfsgid(arg1));
11857 #endif
11858 #ifdef TARGET_NR_mincore
11859     case TARGET_NR_mincore:
11860         {
11861             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11862             if (!a) {
11863                 return -TARGET_ENOMEM;
11864             }
11865             p = lock_user_string(arg3);
11866             if (!p) {
11867                 ret = -TARGET_EFAULT;
11868             } else {
11869                 ret = get_errno(mincore(a, arg2, p));
11870                 unlock_user(p, arg3, ret);
11871             }
11872             unlock_user(a, arg1, 0);
11873         }
11874         return ret;
11875 #endif
11876 #ifdef TARGET_NR_arm_fadvise64_64
11877     case TARGET_NR_arm_fadvise64_64:
11878         /* arm_fadvise64_64 looks like fadvise64_64 but
11879          * with different argument order: fd, advice, offset, len
11880          * rather than the usual fd, offset, len, advice.
11881          * Note that offset and len are both 64-bit so appear as
11882          * pairs of 32-bit registers.
11883          */
11884         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11885                             target_offset64(arg5, arg6), arg2);
11886         return -host_to_target_errno(ret);
11887 #endif
11888 
11889 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11890 
11891 #ifdef TARGET_NR_fadvise64_64
11892     case TARGET_NR_fadvise64_64:
11893 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11894         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11895         ret = arg2;
11896         arg2 = arg3;
11897         arg3 = arg4;
11898         arg4 = arg5;
11899         arg5 = arg6;
11900         arg6 = ret;
11901 #else
11902         /* 6 args: fd, offset (high, low), len (high, low), advice */
11903         if (regpairs_aligned(cpu_env, num)) {
11904             /* offset is in (3,4), len in (5,6) and advice in 7 */
11905             arg2 = arg3;
11906             arg3 = arg4;
11907             arg4 = arg5;
11908             arg5 = arg6;
11909             arg6 = arg7;
11910         }
11911 #endif
11912         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11913                             target_offset64(arg4, arg5), arg6);
11914         return -host_to_target_errno(ret);
11915 #endif
11916 
11917 #ifdef TARGET_NR_fadvise64
11918     case TARGET_NR_fadvise64:
11919         /* 5 args: fd, offset (high, low), len, advice */
11920         if (regpairs_aligned(cpu_env, num)) {
11921             /* offset is in (3,4), len in 5 and advice in 6 */
11922             arg2 = arg3;
11923             arg3 = arg4;
11924             arg4 = arg5;
11925             arg5 = arg6;
11926         }
11927         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11928         return -host_to_target_errno(ret);
11929 #endif
11930 
11931 #else /* not a 32-bit ABI */
11932 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11933 #ifdef TARGET_NR_fadvise64_64
11934     case TARGET_NR_fadvise64_64:
11935 #endif
11936 #ifdef TARGET_NR_fadvise64
11937     case TARGET_NR_fadvise64:
11938 #endif
11939 #ifdef TARGET_S390X
11940         switch (arg4) {
11941         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11942         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11943         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11944         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11945         default: break;
11946         }
11947 #endif
11948         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11949 #endif
11950 #endif /* end of 64-bit ABI fadvise handling */
11951 
11952 #ifdef TARGET_NR_madvise
11953     case TARGET_NR_madvise:
11954         return target_madvise(arg1, arg2, arg3);
11955 #endif
11956 #ifdef TARGET_NR_fcntl64
11957     case TARGET_NR_fcntl64:
11958     {
11959         int cmd;
11960         struct flock64 fl;
11961         from_flock64_fn *copyfrom = copy_from_user_flock64;
11962         to_flock64_fn *copyto = copy_to_user_flock64;
11963 
11964 #ifdef TARGET_ARM
11965         if (!cpu_env->eabi) {
11966             copyfrom = copy_from_user_oabi_flock64;
11967             copyto = copy_to_user_oabi_flock64;
11968         }
11969 #endif
11970 
11971         cmd = target_to_host_fcntl_cmd(arg2);
11972         if (cmd == -TARGET_EINVAL) {
11973             return cmd;
11974         }
11975 
11976         switch(arg2) {
11977         case TARGET_F_GETLK64:
11978             ret = copyfrom(&fl, arg3);
11979             if (ret) {
11980                 break;
11981             }
11982             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11983             if (ret == 0) {
11984                 ret = copyto(arg3, &fl);
11985             }
11986 	    break;
11987 
11988         case TARGET_F_SETLK64:
11989         case TARGET_F_SETLKW64:
11990             ret = copyfrom(&fl, arg3);
11991             if (ret) {
11992                 break;
11993             }
11994             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11995 	    break;
11996         default:
11997             ret = do_fcntl(arg1, arg2, arg3);
11998             break;
11999         }
12000         return ret;
12001     }
12002 #endif
12003 #ifdef TARGET_NR_cacheflush
12004     case TARGET_NR_cacheflush:
12005         /* self-modifying code is handled automatically, so nothing needed */
12006         return 0;
12007 #endif
12008 #ifdef TARGET_NR_getpagesize
12009     case TARGET_NR_getpagesize:
12010         return TARGET_PAGE_SIZE;
12011 #endif
12012     case TARGET_NR_gettid:
12013         return get_errno(sys_gettid());
12014 #ifdef TARGET_NR_readahead
12015     case TARGET_NR_readahead:
12016 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12017         if (regpairs_aligned(cpu_env, num)) {
12018             arg2 = arg3;
12019             arg3 = arg4;
12020             arg4 = arg5;
12021         }
12022         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12023 #else
12024         ret = get_errno(readahead(arg1, arg2, arg3));
12025 #endif
12026         return ret;
12027 #endif
12028 #ifdef CONFIG_ATTR
12029 #ifdef TARGET_NR_setxattr
12030     case TARGET_NR_listxattr:
12031     case TARGET_NR_llistxattr:
12032     {
12033         void *p, *b = 0;
12034         if (arg2) {
12035             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12036             if (!b) {
12037                 return -TARGET_EFAULT;
12038             }
12039         }
12040         p = lock_user_string(arg1);
12041         if (p) {
12042             if (num == TARGET_NR_listxattr) {
12043                 ret = get_errno(listxattr(p, b, arg3));
12044             } else {
12045                 ret = get_errno(llistxattr(p, b, arg3));
12046             }
12047         } else {
12048             ret = -TARGET_EFAULT;
12049         }
12050         unlock_user(p, arg1, 0);
12051         unlock_user(b, arg2, arg3);
12052         return ret;
12053     }
12054     case TARGET_NR_flistxattr:
12055     {
12056         void *b = 0;
12057         if (arg2) {
12058             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12059             if (!b) {
12060                 return -TARGET_EFAULT;
12061             }
12062         }
12063         ret = get_errno(flistxattr(arg1, b, arg3));
12064         unlock_user(b, arg2, arg3);
12065         return ret;
12066     }
12067     case TARGET_NR_setxattr:
12068     case TARGET_NR_lsetxattr:
12069         {
12070             void *p, *n, *v = 0;
12071             if (arg3) {
12072                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12073                 if (!v) {
12074                     return -TARGET_EFAULT;
12075                 }
12076             }
12077             p = lock_user_string(arg1);
12078             n = lock_user_string(arg2);
12079             if (p && n) {
12080                 if (num == TARGET_NR_setxattr) {
12081                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12082                 } else {
12083                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12084                 }
12085             } else {
12086                 ret = -TARGET_EFAULT;
12087             }
12088             unlock_user(p, arg1, 0);
12089             unlock_user(n, arg2, 0);
12090             unlock_user(v, arg3, 0);
12091         }
12092         return ret;
12093     case TARGET_NR_fsetxattr:
12094         {
12095             void *n, *v = 0;
12096             if (arg3) {
12097                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12098                 if (!v) {
12099                     return -TARGET_EFAULT;
12100                 }
12101             }
12102             n = lock_user_string(arg2);
12103             if (n) {
12104                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12105             } else {
12106                 ret = -TARGET_EFAULT;
12107             }
12108             unlock_user(n, arg2, 0);
12109             unlock_user(v, arg3, 0);
12110         }
12111         return ret;
12112     case TARGET_NR_getxattr:
12113     case TARGET_NR_lgetxattr:
12114         {
12115             void *p, *n, *v = 0;
12116             if (arg3) {
12117                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12118                 if (!v) {
12119                     return -TARGET_EFAULT;
12120                 }
12121             }
12122             p = lock_user_string(arg1);
12123             n = lock_user_string(arg2);
12124             if (p && n) {
12125                 if (num == TARGET_NR_getxattr) {
12126                     ret = get_errno(getxattr(p, n, v, arg4));
12127                 } else {
12128                     ret = get_errno(lgetxattr(p, n, v, arg4));
12129                 }
12130             } else {
12131                 ret = -TARGET_EFAULT;
12132             }
12133             unlock_user(p, arg1, 0);
12134             unlock_user(n, arg2, 0);
12135             unlock_user(v, arg3, arg4);
12136         }
12137         return ret;
12138     case TARGET_NR_fgetxattr:
12139         {
12140             void *n, *v = 0;
12141             if (arg3) {
12142                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12143                 if (!v) {
12144                     return -TARGET_EFAULT;
12145                 }
12146             }
12147             n = lock_user_string(arg2);
12148             if (n) {
12149                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12150             } else {
12151                 ret = -TARGET_EFAULT;
12152             }
12153             unlock_user(n, arg2, 0);
12154             unlock_user(v, arg3, arg4);
12155         }
12156         return ret;
12157     case TARGET_NR_removexattr:
12158     case TARGET_NR_lremovexattr:
12159         {
12160             void *p, *n;
12161             p = lock_user_string(arg1);
12162             n = lock_user_string(arg2);
12163             if (p && n) {
12164                 if (num == TARGET_NR_removexattr) {
12165                     ret = get_errno(removexattr(p, n));
12166                 } else {
12167                     ret = get_errno(lremovexattr(p, n));
12168                 }
12169             } else {
12170                 ret = -TARGET_EFAULT;
12171             }
12172             unlock_user(p, arg1, 0);
12173             unlock_user(n, arg2, 0);
12174         }
12175         return ret;
12176     case TARGET_NR_fremovexattr:
12177         {
12178             void *n;
12179             n = lock_user_string(arg2);
12180             if (n) {
12181                 ret = get_errno(fremovexattr(arg1, n));
12182             } else {
12183                 ret = -TARGET_EFAULT;
12184             }
12185             unlock_user(n, arg2, 0);
12186         }
12187         return ret;
12188 #endif
12189 #endif /* CONFIG_ATTR */
12190 #ifdef TARGET_NR_set_thread_area
12191     case TARGET_NR_set_thread_area:
12192 #if defined(TARGET_MIPS)
12193       cpu_env->active_tc.CP0_UserLocal = arg1;
12194       return 0;
12195 #elif defined(TARGET_CRIS)
12196       if (arg1 & 0xff)
12197           ret = -TARGET_EINVAL;
12198       else {
12199           cpu_env->pregs[PR_PID] = arg1;
12200           ret = 0;
12201       }
12202       return ret;
12203 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12204       return do_set_thread_area(cpu_env, arg1);
12205 #elif defined(TARGET_M68K)
12206       {
12207           TaskState *ts = cpu->opaque;
12208           ts->tp_value = arg1;
12209           return 0;
12210       }
12211 #else
12212       return -TARGET_ENOSYS;
12213 #endif
12214 #endif
12215 #ifdef TARGET_NR_get_thread_area
12216     case TARGET_NR_get_thread_area:
12217 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12218         return do_get_thread_area(cpu_env, arg1);
12219 #elif defined(TARGET_M68K)
12220         {
12221             TaskState *ts = cpu->opaque;
12222             return ts->tp_value;
12223         }
12224 #else
12225         return -TARGET_ENOSYS;
12226 #endif
12227 #endif
12228 #ifdef TARGET_NR_getdomainname
12229     case TARGET_NR_getdomainname:
12230         return -TARGET_ENOSYS;
12231 #endif
12232 
12233 #ifdef TARGET_NR_clock_settime
12234     case TARGET_NR_clock_settime:
12235     {
12236         struct timespec ts;
12237 
12238         ret = target_to_host_timespec(&ts, arg2);
12239         if (!is_error(ret)) {
12240             ret = get_errno(clock_settime(arg1, &ts));
12241         }
12242         return ret;
12243     }
12244 #endif
12245 #ifdef TARGET_NR_clock_settime64
12246     case TARGET_NR_clock_settime64:
12247     {
12248         struct timespec ts;
12249 
12250         ret = target_to_host_timespec64(&ts, arg2);
12251         if (!is_error(ret)) {
12252             ret = get_errno(clock_settime(arg1, &ts));
12253         }
12254         return ret;
12255     }
12256 #endif
12257 #ifdef TARGET_NR_clock_gettime
12258     case TARGET_NR_clock_gettime:
12259     {
12260         struct timespec ts;
12261         ret = get_errno(clock_gettime(arg1, &ts));
12262         if (!is_error(ret)) {
12263             ret = host_to_target_timespec(arg2, &ts);
12264         }
12265         return ret;
12266     }
12267 #endif
12268 #ifdef TARGET_NR_clock_gettime64
12269     case TARGET_NR_clock_gettime64:
12270     {
12271         struct timespec ts;
12272         ret = get_errno(clock_gettime(arg1, &ts));
12273         if (!is_error(ret)) {
12274             ret = host_to_target_timespec64(arg2, &ts);
12275         }
12276         return ret;
12277     }
12278 #endif
12279 #ifdef TARGET_NR_clock_getres
12280     case TARGET_NR_clock_getres:
12281     {
12282         struct timespec ts;
12283         ret = get_errno(clock_getres(arg1, &ts));
12284         if (!is_error(ret)) {
12285             host_to_target_timespec(arg2, &ts);
12286         }
12287         return ret;
12288     }
12289 #endif
12290 #ifdef TARGET_NR_clock_getres_time64
12291     case TARGET_NR_clock_getres_time64:
12292     {
12293         struct timespec ts;
12294         ret = get_errno(clock_getres(arg1, &ts));
12295         if (!is_error(ret)) {
12296             host_to_target_timespec64(arg2, &ts);
12297         }
12298         return ret;
12299     }
12300 #endif
12301 #ifdef TARGET_NR_clock_nanosleep
12302     case TARGET_NR_clock_nanosleep:
12303     {
12304         struct timespec ts;
12305         if (target_to_host_timespec(&ts, arg3)) {
12306             return -TARGET_EFAULT;
12307         }
12308         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12309                                              &ts, arg4 ? &ts : NULL));
12310         /*
12311          * if the call is interrupted by a signal handler, it fails
12312          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12313          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12314          */
12315         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12316             host_to_target_timespec(arg4, &ts)) {
12317               return -TARGET_EFAULT;
12318         }
12319 
12320         return ret;
12321     }
12322 #endif
12323 #ifdef TARGET_NR_clock_nanosleep_time64
12324     case TARGET_NR_clock_nanosleep_time64:
12325     {
12326         struct timespec ts;
12327 
12328         if (target_to_host_timespec64(&ts, arg3)) {
12329             return -TARGET_EFAULT;
12330         }
12331 
12332         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12333                                              &ts, arg4 ? &ts : NULL));
12334 
12335         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12336             host_to_target_timespec64(arg4, &ts)) {
12337             return -TARGET_EFAULT;
12338         }
12339         return ret;
12340     }
12341 #endif
12342 
12343 #if defined(TARGET_NR_set_tid_address)
12344     case TARGET_NR_set_tid_address:
12345     {
12346         TaskState *ts = cpu->opaque;
12347         ts->child_tidptr = arg1;
12348         /* do not call host set_tid_address() syscall, instead return tid() */
12349         return get_errno(sys_gettid());
12350     }
12351 #endif
12352 
12353     case TARGET_NR_tkill:
12354         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12355 
12356     case TARGET_NR_tgkill:
12357         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12358                          target_to_host_signal(arg3)));
12359 
12360 #ifdef TARGET_NR_set_robust_list
12361     case TARGET_NR_set_robust_list:
12362     case TARGET_NR_get_robust_list:
12363         /* The ABI for supporting robust futexes has userspace pass
12364          * the kernel a pointer to a linked list which is updated by
12365          * userspace after the syscall; the list is walked by the kernel
12366          * when the thread exits. Since the linked list in QEMU guest
12367          * memory isn't a valid linked list for the host and we have
12368          * no way to reliably intercept the thread-death event, we can't
12369          * support these. Silently return ENOSYS so that guest userspace
12370          * falls back to a non-robust futex implementation (which should
12371          * be OK except in the corner case of the guest crashing while
12372          * holding a mutex that is shared with another process via
12373          * shared memory).
12374          */
12375         return -TARGET_ENOSYS;
12376 #endif
12377 
12378 #if defined(TARGET_NR_utimensat)
12379     case TARGET_NR_utimensat:
12380         {
12381             struct timespec *tsp, ts[2];
12382             if (!arg3) {
12383                 tsp = NULL;
12384             } else {
12385                 if (target_to_host_timespec(ts, arg3)) {
12386                     return -TARGET_EFAULT;
12387                 }
12388                 if (target_to_host_timespec(ts + 1, arg3 +
12389                                             sizeof(struct target_timespec))) {
12390                     return -TARGET_EFAULT;
12391                 }
12392                 tsp = ts;
12393             }
12394             if (!arg2)
12395                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12396             else {
12397                 if (!(p = lock_user_string(arg2))) {
12398                     return -TARGET_EFAULT;
12399                 }
12400                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12401                 unlock_user(p, arg2, 0);
12402             }
12403         }
12404         return ret;
12405 #endif
12406 #ifdef TARGET_NR_utimensat_time64
12407     case TARGET_NR_utimensat_time64:
12408         {
12409             struct timespec *tsp, ts[2];
12410             if (!arg3) {
12411                 tsp = NULL;
12412             } else {
12413                 if (target_to_host_timespec64(ts, arg3)) {
12414                     return -TARGET_EFAULT;
12415                 }
12416                 if (target_to_host_timespec64(ts + 1, arg3 +
12417                                      sizeof(struct target__kernel_timespec))) {
12418                     return -TARGET_EFAULT;
12419                 }
12420                 tsp = ts;
12421             }
12422             if (!arg2)
12423                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12424             else {
12425                 p = lock_user_string(arg2);
12426                 if (!p) {
12427                     return -TARGET_EFAULT;
12428                 }
12429                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12430                 unlock_user(p, arg2, 0);
12431             }
12432         }
12433         return ret;
12434 #endif
12435 #ifdef TARGET_NR_futex
12436     case TARGET_NR_futex:
12437         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12438 #endif
12439 #ifdef TARGET_NR_futex_time64
12440     case TARGET_NR_futex_time64:
12441         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12442 #endif
12443 #ifdef CONFIG_INOTIFY
12444 #if defined(TARGET_NR_inotify_init)
12445     case TARGET_NR_inotify_init:
12446         ret = get_errno(inotify_init());
12447         if (ret >= 0) {
12448             fd_trans_register(ret, &target_inotify_trans);
12449         }
12450         return ret;
12451 #endif
12452 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12453     case TARGET_NR_inotify_init1:
12454         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12455                                           fcntl_flags_tbl)));
12456         if (ret >= 0) {
12457             fd_trans_register(ret, &target_inotify_trans);
12458         }
12459         return ret;
12460 #endif
12461 #if defined(TARGET_NR_inotify_add_watch)
12462     case TARGET_NR_inotify_add_watch:
12463         p = lock_user_string(arg2);
12464         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12465         unlock_user(p, arg2, 0);
12466         return ret;
12467 #endif
12468 #if defined(TARGET_NR_inotify_rm_watch)
12469     case TARGET_NR_inotify_rm_watch:
12470         return get_errno(inotify_rm_watch(arg1, arg2));
12471 #endif
12472 #endif
12473 
12474 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12475     case TARGET_NR_mq_open:
12476         {
12477             struct mq_attr posix_mq_attr;
12478             struct mq_attr *pposix_mq_attr;
12479             int host_flags;
12480 
12481             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12482             pposix_mq_attr = NULL;
12483             if (arg4) {
12484                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12485                     return -TARGET_EFAULT;
12486                 }
12487                 pposix_mq_attr = &posix_mq_attr;
12488             }
12489             p = lock_user_string(arg1 - 1);
12490             if (!p) {
12491                 return -TARGET_EFAULT;
12492             }
12493             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12494             unlock_user (p, arg1, 0);
12495         }
12496         return ret;
12497 
12498     case TARGET_NR_mq_unlink:
12499         p = lock_user_string(arg1 - 1);
12500         if (!p) {
12501             return -TARGET_EFAULT;
12502         }
12503         ret = get_errno(mq_unlink(p));
12504         unlock_user (p, arg1, 0);
12505         return ret;
12506 
12507 #ifdef TARGET_NR_mq_timedsend
12508     case TARGET_NR_mq_timedsend:
12509         {
12510             struct timespec ts;
12511 
12512             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12513             if (arg5 != 0) {
12514                 if (target_to_host_timespec(&ts, arg5)) {
12515                     return -TARGET_EFAULT;
12516                 }
12517                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12518                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12519                     return -TARGET_EFAULT;
12520                 }
12521             } else {
12522                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12523             }
12524             unlock_user (p, arg2, arg3);
12525         }
12526         return ret;
12527 #endif
12528 #ifdef TARGET_NR_mq_timedsend_time64
12529     case TARGET_NR_mq_timedsend_time64:
12530         {
12531             struct timespec ts;
12532 
12533             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12534             if (arg5 != 0) {
12535                 if (target_to_host_timespec64(&ts, arg5)) {
12536                     return -TARGET_EFAULT;
12537                 }
12538                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12539                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12540                     return -TARGET_EFAULT;
12541                 }
12542             } else {
12543                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12544             }
12545             unlock_user(p, arg2, arg3);
12546         }
12547         return ret;
12548 #endif
12549 
12550 #ifdef TARGET_NR_mq_timedreceive
12551     case TARGET_NR_mq_timedreceive:
12552         {
12553             struct timespec ts;
12554             unsigned int prio;
12555 
12556             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12557             if (arg5 != 0) {
12558                 if (target_to_host_timespec(&ts, arg5)) {
12559                     return -TARGET_EFAULT;
12560                 }
12561                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12562                                                      &prio, &ts));
12563                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12564                     return -TARGET_EFAULT;
12565                 }
12566             } else {
12567                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12568                                                      &prio, NULL));
12569             }
12570             unlock_user (p, arg2, arg3);
12571             if (arg4 != 0)
12572                 put_user_u32(prio, arg4);
12573         }
12574         return ret;
12575 #endif
12576 #ifdef TARGET_NR_mq_timedreceive_time64
12577     case TARGET_NR_mq_timedreceive_time64:
12578         {
12579             struct timespec ts;
12580             unsigned int prio;
12581 
12582             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12583             if (arg5 != 0) {
12584                 if (target_to_host_timespec64(&ts, arg5)) {
12585                     return -TARGET_EFAULT;
12586                 }
12587                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12588                                                      &prio, &ts));
12589                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12590                     return -TARGET_EFAULT;
12591                 }
12592             } else {
12593                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12594                                                      &prio, NULL));
12595             }
12596             unlock_user(p, arg2, arg3);
12597             if (arg4 != 0) {
12598                 put_user_u32(prio, arg4);
12599             }
12600         }
12601         return ret;
12602 #endif
12603 
12604     /* Not implemented for now... */
12605 /*     case TARGET_NR_mq_notify: */
12606 /*         break; */
12607 
12608     case TARGET_NR_mq_getsetattr:
12609         {
12610             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12611             ret = 0;
12612             if (arg2 != 0) {
12613                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12614                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12615                                            &posix_mq_attr_out));
12616             } else if (arg3 != 0) {
12617                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12618             }
12619             if (ret == 0 && arg3 != 0) {
12620                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12621             }
12622         }
12623         return ret;
12624 #endif
12625 
12626 #ifdef CONFIG_SPLICE
12627 #ifdef TARGET_NR_tee
12628     case TARGET_NR_tee:
12629         {
12630             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12631         }
12632         return ret;
12633 #endif
12634 #ifdef TARGET_NR_splice
12635     case TARGET_NR_splice:
12636         {
12637             loff_t loff_in, loff_out;
12638             loff_t *ploff_in = NULL, *ploff_out = NULL;
12639             if (arg2) {
12640                 if (get_user_u64(loff_in, arg2)) {
12641                     return -TARGET_EFAULT;
12642                 }
12643                 ploff_in = &loff_in;
12644             }
12645             if (arg4) {
12646                 if (get_user_u64(loff_out, arg4)) {
12647                     return -TARGET_EFAULT;
12648                 }
12649                 ploff_out = &loff_out;
12650             }
12651             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12652             if (arg2) {
12653                 if (put_user_u64(loff_in, arg2)) {
12654                     return -TARGET_EFAULT;
12655                 }
12656             }
12657             if (arg4) {
12658                 if (put_user_u64(loff_out, arg4)) {
12659                     return -TARGET_EFAULT;
12660                 }
12661             }
12662         }
12663         return ret;
12664 #endif
12665 #ifdef TARGET_NR_vmsplice
12666 	case TARGET_NR_vmsplice:
12667         {
12668             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12669             if (vec != NULL) {
12670                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12671                 unlock_iovec(vec, arg2, arg3, 0);
12672             } else {
12673                 ret = -host_to_target_errno(errno);
12674             }
12675         }
12676         return ret;
12677 #endif
12678 #endif /* CONFIG_SPLICE */
12679 #ifdef CONFIG_EVENTFD
12680 #if defined(TARGET_NR_eventfd)
12681     case TARGET_NR_eventfd:
12682         ret = get_errno(eventfd(arg1, 0));
12683         if (ret >= 0) {
12684             fd_trans_register(ret, &target_eventfd_trans);
12685         }
12686         return ret;
12687 #endif
12688 #if defined(TARGET_NR_eventfd2)
12689     case TARGET_NR_eventfd2:
12690     {
12691         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12692         if (arg2 & TARGET_O_NONBLOCK) {
12693             host_flags |= O_NONBLOCK;
12694         }
12695         if (arg2 & TARGET_O_CLOEXEC) {
12696             host_flags |= O_CLOEXEC;
12697         }
12698         ret = get_errno(eventfd(arg1, host_flags));
12699         if (ret >= 0) {
12700             fd_trans_register(ret, &target_eventfd_trans);
12701         }
12702         return ret;
12703     }
12704 #endif
12705 #endif /* CONFIG_EVENTFD  */
12706 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12707     case TARGET_NR_fallocate:
12708 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12709         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12710                                   target_offset64(arg5, arg6)));
12711 #else
12712         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12713 #endif
12714         return ret;
12715 #endif
12716 #if defined(CONFIG_SYNC_FILE_RANGE)
12717 #if defined(TARGET_NR_sync_file_range)
12718     case TARGET_NR_sync_file_range:
12719 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12720 #if defined(TARGET_MIPS)
12721         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12722                                         target_offset64(arg5, arg6), arg7));
12723 #else
12724         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12725                                         target_offset64(arg4, arg5), arg6));
12726 #endif /* !TARGET_MIPS */
12727 #else
12728         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12729 #endif
12730         return ret;
12731 #endif
12732 #if defined(TARGET_NR_sync_file_range2) || \
12733     defined(TARGET_NR_arm_sync_file_range)
12734 #if defined(TARGET_NR_sync_file_range2)
12735     case TARGET_NR_sync_file_range2:
12736 #endif
12737 #if defined(TARGET_NR_arm_sync_file_range)
12738     case TARGET_NR_arm_sync_file_range:
12739 #endif
12740         /* This is like sync_file_range but the arguments are reordered */
12741 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12742         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12743                                         target_offset64(arg5, arg6), arg2));
12744 #else
12745         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12746 #endif
12747         return ret;
12748 #endif
12749 #endif
12750 #if defined(TARGET_NR_signalfd4)
12751     case TARGET_NR_signalfd4:
12752         return do_signalfd4(arg1, arg2, arg4);
12753 #endif
12754 #if defined(TARGET_NR_signalfd)
12755     case TARGET_NR_signalfd:
12756         return do_signalfd4(arg1, arg2, 0);
12757 #endif
12758 #if defined(CONFIG_EPOLL)
12759 #if defined(TARGET_NR_epoll_create)
12760     case TARGET_NR_epoll_create:
12761         return get_errno(epoll_create(arg1));
12762 #endif
12763 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12764     case TARGET_NR_epoll_create1:
12765         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12766 #endif
12767 #if defined(TARGET_NR_epoll_ctl)
12768     case TARGET_NR_epoll_ctl:
12769     {
12770         struct epoll_event ep;
12771         struct epoll_event *epp = 0;
12772         if (arg4) {
12773             if (arg2 != EPOLL_CTL_DEL) {
12774                 struct target_epoll_event *target_ep;
12775                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12776                     return -TARGET_EFAULT;
12777                 }
12778                 ep.events = tswap32(target_ep->events);
12779                 /*
12780                  * The epoll_data_t union is just opaque data to the kernel,
12781                  * so we transfer all 64 bits across and need not worry what
12782                  * actual data type it is.
12783                  */
12784                 ep.data.u64 = tswap64(target_ep->data.u64);
12785                 unlock_user_struct(target_ep, arg4, 0);
12786             }
12787             /*
12788              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12789              * non-null pointer, even though this argument is ignored.
12790              *
12791              */
12792             epp = &ep;
12793         }
12794         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12795     }
12796 #endif
12797 
12798 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12799 #if defined(TARGET_NR_epoll_wait)
12800     case TARGET_NR_epoll_wait:
12801 #endif
12802 #if defined(TARGET_NR_epoll_pwait)
12803     case TARGET_NR_epoll_pwait:
12804 #endif
12805     {
12806         struct target_epoll_event *target_ep;
12807         struct epoll_event *ep;
12808         int epfd = arg1;
12809         int maxevents = arg3;
12810         int timeout = arg4;
12811 
12812         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12813             return -TARGET_EINVAL;
12814         }
12815 
12816         target_ep = lock_user(VERIFY_WRITE, arg2,
12817                               maxevents * sizeof(struct target_epoll_event), 1);
12818         if (!target_ep) {
12819             return -TARGET_EFAULT;
12820         }
12821 
12822         ep = g_try_new(struct epoll_event, maxevents);
12823         if (!ep) {
12824             unlock_user(target_ep, arg2, 0);
12825             return -TARGET_ENOMEM;
12826         }
12827 
12828         switch (num) {
12829 #if defined(TARGET_NR_epoll_pwait)
12830         case TARGET_NR_epoll_pwait:
12831         {
12832             sigset_t *set = NULL;
12833 
12834             if (arg5) {
12835                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12836                 if (ret != 0) {
12837                     break;
12838                 }
12839             }
12840 
12841             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12842                                              set, SIGSET_T_SIZE));
12843 
12844             if (set) {
12845                 finish_sigsuspend_mask(ret);
12846             }
12847             break;
12848         }
12849 #endif
12850 #if defined(TARGET_NR_epoll_wait)
12851         case TARGET_NR_epoll_wait:
12852             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12853                                              NULL, 0));
12854             break;
12855 #endif
12856         default:
12857             ret = -TARGET_ENOSYS;
12858         }
12859         if (!is_error(ret)) {
12860             int i;
12861             for (i = 0; i < ret; i++) {
12862                 target_ep[i].events = tswap32(ep[i].events);
12863                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12864             }
12865             unlock_user(target_ep, arg2,
12866                         ret * sizeof(struct target_epoll_event));
12867         } else {
12868             unlock_user(target_ep, arg2, 0);
12869         }
12870         g_free(ep);
12871         return ret;
12872     }
12873 #endif
12874 #endif
12875 #ifdef TARGET_NR_prlimit64
12876     case TARGET_NR_prlimit64:
12877     {
12878         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12879         struct target_rlimit64 *target_rnew, *target_rold;
12880         struct host_rlimit64 rnew, rold, *rnewp = 0;
12881         int resource = target_to_host_resource(arg2);
12882 
12883         if (arg3 && (resource != RLIMIT_AS &&
12884                      resource != RLIMIT_DATA &&
12885                      resource != RLIMIT_STACK)) {
12886             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12887                 return -TARGET_EFAULT;
12888             }
12889             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12890             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12891             unlock_user_struct(target_rnew, arg3, 0);
12892             rnewp = &rnew;
12893         }
12894 
12895         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12896         if (!is_error(ret) && arg4) {
12897             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12898                 return -TARGET_EFAULT;
12899             }
12900             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12901             target_rold->rlim_max = tswap64(rold.rlim_max);
12902             unlock_user_struct(target_rold, arg4, 1);
12903         }
12904         return ret;
12905     }
12906 #endif
12907 #ifdef TARGET_NR_gethostname
12908     case TARGET_NR_gethostname:
12909     {
12910         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12911         if (name) {
12912             ret = get_errno(gethostname(name, arg2));
12913             unlock_user(name, arg1, arg2);
12914         } else {
12915             ret = -TARGET_EFAULT;
12916         }
12917         return ret;
12918     }
12919 #endif
12920 #ifdef TARGET_NR_atomic_cmpxchg_32
12921     case TARGET_NR_atomic_cmpxchg_32:
12922     {
12923         /* should use start_exclusive from main.c */
12924         abi_ulong mem_value;
12925         if (get_user_u32(mem_value, arg6)) {
12926             target_siginfo_t info;
12927             info.si_signo = SIGSEGV;
12928             info.si_errno = 0;
12929             info.si_code = TARGET_SEGV_MAPERR;
12930             info._sifields._sigfault._addr = arg6;
12931             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12932             ret = 0xdeadbeef;
12933 
12934         }
12935         if (mem_value == arg2)
12936             put_user_u32(arg1, arg6);
12937         return mem_value;
12938     }
12939 #endif
12940 #ifdef TARGET_NR_atomic_barrier
12941     case TARGET_NR_atomic_barrier:
12942         /* Like the kernel implementation and the
12943            qemu arm barrier, no-op this? */
12944         return 0;
12945 #endif
12946 
12947 #ifdef TARGET_NR_timer_create
12948     case TARGET_NR_timer_create:
12949     {
12950         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12951 
12952         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12953 
12954         int clkid = arg1;
12955         int timer_index = next_free_host_timer();
12956 
12957         if (timer_index < 0) {
12958             ret = -TARGET_EAGAIN;
12959         } else {
12960             timer_t *phtimer = g_posix_timers  + timer_index;
12961 
12962             if (arg2) {
12963                 phost_sevp = &host_sevp;
12964                 ret = target_to_host_sigevent(phost_sevp, arg2);
12965                 if (ret != 0) {
12966                     free_host_timer_slot(timer_index);
12967                     return ret;
12968                 }
12969             }
12970 
12971             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12972             if (ret) {
12973                 free_host_timer_slot(timer_index);
12974             } else {
12975                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12976                     timer_delete(*phtimer);
12977                     free_host_timer_slot(timer_index);
12978                     return -TARGET_EFAULT;
12979                 }
12980             }
12981         }
12982         return ret;
12983     }
12984 #endif
12985 
12986 #ifdef TARGET_NR_timer_settime
12987     case TARGET_NR_timer_settime:
12988     {
12989         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12990          * struct itimerspec * old_value */
12991         target_timer_t timerid = get_timer_id(arg1);
12992 
12993         if (timerid < 0) {
12994             ret = timerid;
12995         } else if (arg3 == 0) {
12996             ret = -TARGET_EINVAL;
12997         } else {
12998             timer_t htimer = g_posix_timers[timerid];
12999             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13000 
13001             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13002                 return -TARGET_EFAULT;
13003             }
13004             ret = get_errno(
13005                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13006             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13007                 return -TARGET_EFAULT;
13008             }
13009         }
13010         return ret;
13011     }
13012 #endif
13013 
13014 #ifdef TARGET_NR_timer_settime64
13015     case TARGET_NR_timer_settime64:
13016     {
13017         target_timer_t timerid = get_timer_id(arg1);
13018 
13019         if (timerid < 0) {
13020             ret = timerid;
13021         } else if (arg3 == 0) {
13022             ret = -TARGET_EINVAL;
13023         } else {
13024             timer_t htimer = g_posix_timers[timerid];
13025             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13026 
13027             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13028                 return -TARGET_EFAULT;
13029             }
13030             ret = get_errno(
13031                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13032             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13033                 return -TARGET_EFAULT;
13034             }
13035         }
13036         return ret;
13037     }
13038 #endif
13039 
13040 #ifdef TARGET_NR_timer_gettime
13041     case TARGET_NR_timer_gettime:
13042     {
13043         /* args: timer_t timerid, struct itimerspec *curr_value */
13044         target_timer_t timerid = get_timer_id(arg1);
13045 
13046         if (timerid < 0) {
13047             ret = timerid;
13048         } else if (!arg2) {
13049             ret = -TARGET_EFAULT;
13050         } else {
13051             timer_t htimer = g_posix_timers[timerid];
13052             struct itimerspec hspec;
13053             ret = get_errno(timer_gettime(htimer, &hspec));
13054 
13055             if (host_to_target_itimerspec(arg2, &hspec)) {
13056                 ret = -TARGET_EFAULT;
13057             }
13058         }
13059         return ret;
13060     }
13061 #endif
13062 
13063 #ifdef TARGET_NR_timer_gettime64
13064     case TARGET_NR_timer_gettime64:
13065     {
13066         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13067         target_timer_t timerid = get_timer_id(arg1);
13068 
13069         if (timerid < 0) {
13070             ret = timerid;
13071         } else if (!arg2) {
13072             ret = -TARGET_EFAULT;
13073         } else {
13074             timer_t htimer = g_posix_timers[timerid];
13075             struct itimerspec hspec;
13076             ret = get_errno(timer_gettime(htimer, &hspec));
13077 
13078             if (host_to_target_itimerspec64(arg2, &hspec)) {
13079                 ret = -TARGET_EFAULT;
13080             }
13081         }
13082         return ret;
13083     }
13084 #endif
13085 
13086 #ifdef TARGET_NR_timer_getoverrun
13087     case TARGET_NR_timer_getoverrun:
13088     {
13089         /* args: timer_t timerid */
13090         target_timer_t timerid = get_timer_id(arg1);
13091 
13092         if (timerid < 0) {
13093             ret = timerid;
13094         } else {
13095             timer_t htimer = g_posix_timers[timerid];
13096             ret = get_errno(timer_getoverrun(htimer));
13097         }
13098         return ret;
13099     }
13100 #endif
13101 
13102 #ifdef TARGET_NR_timer_delete
13103     case TARGET_NR_timer_delete:
13104     {
13105         /* args: timer_t timerid */
13106         target_timer_t timerid = get_timer_id(arg1);
13107 
13108         if (timerid < 0) {
13109             ret = timerid;
13110         } else {
13111             timer_t htimer = g_posix_timers[timerid];
13112             ret = get_errno(timer_delete(htimer));
13113             free_host_timer_slot(timerid);
13114         }
13115         return ret;
13116     }
13117 #endif
13118 
13119 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13120     case TARGET_NR_timerfd_create:
13121         return get_errno(timerfd_create(arg1,
13122                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13123 #endif
13124 
13125 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13126     case TARGET_NR_timerfd_gettime:
13127         {
13128             struct itimerspec its_curr;
13129 
13130             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13131 
13132             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13133                 return -TARGET_EFAULT;
13134             }
13135         }
13136         return ret;
13137 #endif
13138 
13139 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13140     case TARGET_NR_timerfd_gettime64:
13141         {
13142             struct itimerspec its_curr;
13143 
13144             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13145 
13146             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13147                 return -TARGET_EFAULT;
13148             }
13149         }
13150         return ret;
13151 #endif
13152 
13153 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13154     case TARGET_NR_timerfd_settime:
13155         {
13156             struct itimerspec its_new, its_old, *p_new;
13157 
13158             if (arg3) {
13159                 if (target_to_host_itimerspec(&its_new, arg3)) {
13160                     return -TARGET_EFAULT;
13161                 }
13162                 p_new = &its_new;
13163             } else {
13164                 p_new = NULL;
13165             }
13166 
13167             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13168 
13169             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13170                 return -TARGET_EFAULT;
13171             }
13172         }
13173         return ret;
13174 #endif
13175 
13176 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13177     case TARGET_NR_timerfd_settime64:
13178         {
13179             struct itimerspec its_new, its_old, *p_new;
13180 
13181             if (arg3) {
13182                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13183                     return -TARGET_EFAULT;
13184                 }
13185                 p_new = &its_new;
13186             } else {
13187                 p_new = NULL;
13188             }
13189 
13190             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13191 
13192             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13193                 return -TARGET_EFAULT;
13194             }
13195         }
13196         return ret;
13197 #endif
13198 
13199 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13200     case TARGET_NR_ioprio_get:
13201         return get_errno(ioprio_get(arg1, arg2));
13202 #endif
13203 
13204 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13205     case TARGET_NR_ioprio_set:
13206         return get_errno(ioprio_set(arg1, arg2, arg3));
13207 #endif
13208 
13209 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13210     case TARGET_NR_setns:
13211         return get_errno(setns(arg1, arg2));
13212 #endif
13213 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13214     case TARGET_NR_unshare:
13215         return get_errno(unshare(arg1));
13216 #endif
13217 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13218     case TARGET_NR_kcmp:
13219         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13220 #endif
13221 #ifdef TARGET_NR_swapcontext
13222     case TARGET_NR_swapcontext:
13223         /* PowerPC specific.  */
13224         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13225 #endif
13226 #ifdef TARGET_NR_memfd_create
13227     case TARGET_NR_memfd_create:
13228         p = lock_user_string(arg1);
13229         if (!p) {
13230             return -TARGET_EFAULT;
13231         }
13232         ret = get_errno(memfd_create(p, arg2));
13233         fd_trans_unregister(ret);
13234         unlock_user(p, arg1, 0);
13235         return ret;
13236 #endif
13237 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13238     case TARGET_NR_membarrier:
13239         return get_errno(membarrier(arg1, arg2));
13240 #endif
13241 
13242 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13243     case TARGET_NR_copy_file_range:
13244         {
13245             loff_t inoff, outoff;
13246             loff_t *pinoff = NULL, *poutoff = NULL;
13247 
13248             if (arg2) {
13249                 if (get_user_u64(inoff, arg2)) {
13250                     return -TARGET_EFAULT;
13251                 }
13252                 pinoff = &inoff;
13253             }
13254             if (arg4) {
13255                 if (get_user_u64(outoff, arg4)) {
13256                     return -TARGET_EFAULT;
13257                 }
13258                 poutoff = &outoff;
13259             }
13260             /* Do not sign-extend the count parameter. */
13261             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13262                                                  (abi_ulong)arg5, arg6));
13263             if (!is_error(ret) && ret > 0) {
13264                 if (arg2) {
13265                     if (put_user_u64(inoff, arg2)) {
13266                         return -TARGET_EFAULT;
13267                     }
13268                 }
13269                 if (arg4) {
13270                     if (put_user_u64(outoff, arg4)) {
13271                         return -TARGET_EFAULT;
13272                     }
13273                 }
13274             }
13275         }
13276         return ret;
13277 #endif
13278 
13279 #if defined(TARGET_NR_pivot_root)
13280     case TARGET_NR_pivot_root:
13281         {
13282             void *p2;
13283             p = lock_user_string(arg1); /* new_root */
13284             p2 = lock_user_string(arg2); /* put_old */
13285             if (!p || !p2) {
13286                 ret = -TARGET_EFAULT;
13287             } else {
13288                 ret = get_errno(pivot_root(p, p2));
13289             }
13290             unlock_user(p2, arg2, 0);
13291             unlock_user(p, arg1, 0);
13292         }
13293         return ret;
13294 #endif
13295 
13296     default:
13297         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13298         return -TARGET_ENOSYS;
13299     }
13300     return ret;
13301 }
13302 
13303 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13304                     abi_long arg2, abi_long arg3, abi_long arg4,
13305                     abi_long arg5, abi_long arg6, abi_long arg7,
13306                     abi_long arg8)
13307 {
13308     CPUState *cpu = env_cpu(cpu_env);
13309     abi_long ret;
13310 
13311 #ifdef DEBUG_ERESTARTSYS
13312     /* Debug-only code for exercising the syscall-restart code paths
13313      * in the per-architecture cpu main loops: restart every syscall
13314      * the guest makes once before letting it through.
13315      */
13316     {
13317         static bool flag;
13318         flag = !flag;
13319         if (flag) {
13320             return -QEMU_ERESTARTSYS;
13321         }
13322     }
13323 #endif
13324 
13325     record_syscall_start(cpu, num, arg1,
13326                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13327 
13328     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13329         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13330     }
13331 
13332     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13333                       arg5, arg6, arg7, arg8);
13334 
13335     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13336         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13337                           arg3, arg4, arg5, arg6);
13338     }
13339 
13340     record_syscall_return(cpu, num, ret);
13341     return ret;
13342 }
13343