xref: /openbmc/qemu/linux-user/syscall.c (revision 6003159c)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 #include "cpu_loop-common.h"
144 
145 #ifndef CLONE_IO
146 #define CLONE_IO                0x80000000      /* Clone io context */
147 #endif
148 
149 /* We can't directly call the host clone syscall, because this will
150  * badly confuse libc (breaking mutexes, for example). So we must
151  * divide clone flags into:
152  *  * flag combinations that look like pthread_create()
153  *  * flag combinations that look like fork()
154  *  * flags we can implement within QEMU itself
155  *  * flags we can't support and will return an error for
156  */
157 /* For thread creation, all these flags must be present; for
158  * fork, none must be present.
159  */
160 #define CLONE_THREAD_FLAGS                              \
161     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
162      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 
164 /* These flags are ignored:
165  * CLONE_DETACHED is now ignored by the kernel;
166  * CLONE_IO is just an optimisation hint to the I/O scheduler
167  */
168 #define CLONE_IGNORED_FLAGS                     \
169     (CLONE_DETACHED | CLONE_IO)
170 
171 /* Flags for fork which we can implement within QEMU itself */
172 #define CLONE_OPTIONAL_FORK_FLAGS               \
173     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
174      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 
176 /* Flags for thread creation which we can implement within QEMU itself */
177 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
178     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
179      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 
181 #define CLONE_INVALID_FORK_FLAGS                                        \
182     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 
184 #define CLONE_INVALID_THREAD_FLAGS                                      \
185     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
186        CLONE_IGNORED_FLAGS))
187 
188 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
189  * have almost all been allocated. We cannot support any of
190  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
191  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
192  * The checks against the invalid thread masks above will catch these.
193  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
194  */
195 
196 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
197  * once. This exercises the codepaths for restart.
198  */
199 //#define DEBUG_ERESTARTSYS
200 
201 //#include <linux/msdos_fs.h>
202 #define VFAT_IOCTL_READDIR_BOTH \
203     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
204 #define VFAT_IOCTL_READDIR_SHORT \
205     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
206 
207 #undef _syscall0
208 #undef _syscall1
209 #undef _syscall2
210 #undef _syscall3
211 #undef _syscall4
212 #undef _syscall5
213 #undef _syscall6
214 
215 #define _syscall0(type,name)		\
216 static type name (void)			\
217 {					\
218 	return syscall(__NR_##name);	\
219 }
220 
221 #define _syscall1(type,name,type1,arg1)		\
222 static type name (type1 arg1)			\
223 {						\
224 	return syscall(__NR_##name, arg1);	\
225 }
226 
227 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
228 static type name (type1 arg1,type2 arg2)		\
229 {							\
230 	return syscall(__NR_##name, arg1, arg2);	\
231 }
232 
233 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
234 static type name (type1 arg1,type2 arg2,type3 arg3)		\
235 {								\
236 	return syscall(__NR_##name, arg1, arg2, arg3);		\
237 }
238 
239 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
241 {										\
242 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
243 }
244 
245 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
246 		  type5,arg5)							\
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
248 {										\
249 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
250 }
251 
252 
253 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
254 		  type5,arg5,type6,arg6)					\
255 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
256                   type6 arg6)							\
257 {										\
258 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
259 }
260 
261 
262 #define __NR_sys_uname __NR_uname
263 #define __NR_sys_getcwd1 __NR_getcwd
264 #define __NR_sys_getdents __NR_getdents
265 #define __NR_sys_getdents64 __NR_getdents64
266 #define __NR_sys_getpriority __NR_getpriority
267 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
268 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
269 #define __NR_sys_syslog __NR_syslog
270 #if defined(__NR_futex)
271 # define __NR_sys_futex __NR_futex
272 #endif
273 #if defined(__NR_futex_time64)
274 # define __NR_sys_futex_time64 __NR_futex_time64
275 #endif
276 #define __NR_sys_statx __NR_statx
277 
278 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
279 #define __NR__llseek __NR_lseek
280 #endif
281 
282 /* Newer kernel ports have llseek() instead of _llseek() */
283 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
284 #define TARGET_NR__llseek TARGET_NR_llseek
285 #endif
286 
287 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
288 #ifndef TARGET_O_NONBLOCK_MASK
289 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
290 #endif
291 
292 #define __NR_sys_gettid __NR_gettid
293 _syscall0(int, sys_gettid)
294 
295 /* For the 64-bit guest on 32-bit host case we must emulate
296  * getdents using getdents64, because otherwise the host
297  * might hand us back more dirent records than we can fit
298  * into the guest buffer after structure format conversion.
299  * Otherwise we emulate getdents with getdents if the host has it.
300  */
301 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
302 #define EMULATE_GETDENTS_WITH_GETDENTS
303 #endif
304 
305 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
306 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
307 #endif
308 #if (defined(TARGET_NR_getdents) && \
309       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
310     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
311 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
312 #endif
313 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
314 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
315           loff_t *, res, uint, wh);
316 #endif
317 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
318 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
319           siginfo_t *, uinfo)
320 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
321 #ifdef __NR_exit_group
322 _syscall1(int,exit_group,int,error_code)
323 #endif
324 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
325 #define __NR_sys_close_range __NR_close_range
326 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
327 #ifndef CLOSE_RANGE_CLOEXEC
328 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
329 #endif
330 #endif
331 #if defined(__NR_futex)
332 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
333           const struct timespec *,timeout,int *,uaddr2,int,val3)
334 #endif
335 #if defined(__NR_futex_time64)
336 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
337           const struct timespec *,timeout,int *,uaddr2,int,val3)
338 #endif
339 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
340 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
341 #endif
342 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
343 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
344                              unsigned int, flags);
345 #endif
346 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
347 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
348 #endif
349 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
350 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
351           unsigned long *, user_mask_ptr);
352 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
353 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
354           unsigned long *, user_mask_ptr);
355 /* sched_attr is not defined in glibc */
356 struct sched_attr {
357     uint32_t size;
358     uint32_t sched_policy;
359     uint64_t sched_flags;
360     int32_t sched_nice;
361     uint32_t sched_priority;
362     uint64_t sched_runtime;
363     uint64_t sched_deadline;
364     uint64_t sched_period;
365     uint32_t sched_util_min;
366     uint32_t sched_util_max;
367 };
368 #define __NR_sys_sched_getattr __NR_sched_getattr
369 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
370           unsigned int, size, unsigned int, flags);
371 #define __NR_sys_sched_setattr __NR_sched_setattr
372 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
373           unsigned int, flags);
374 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
375 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
376 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
377 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
378           const struct sched_param *, param);
379 #define __NR_sys_sched_getparam __NR_sched_getparam
380 _syscall2(int, sys_sched_getparam, pid_t, pid,
381           struct sched_param *, param);
382 #define __NR_sys_sched_setparam __NR_sched_setparam
383 _syscall2(int, sys_sched_setparam, pid_t, pid,
384           const struct sched_param *, param);
385 #define __NR_sys_getcpu __NR_getcpu
386 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
387 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
388           void *, arg);
389 _syscall2(int, capget, struct __user_cap_header_struct *, header,
390           struct __user_cap_data_struct *, data);
391 _syscall2(int, capset, struct __user_cap_header_struct *, header,
392           struct __user_cap_data_struct *, data);
393 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
394 _syscall2(int, ioprio_get, int, which, int, who)
395 #endif
396 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
397 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
398 #endif
399 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
400 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
401 #endif
402 
403 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
404 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
405           unsigned long, idx1, unsigned long, idx2)
406 #endif
407 
408 /*
409  * It is assumed that struct statx is architecture independent.
410  */
411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
412 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
413           unsigned int, mask, struct target_statx *, statxbuf)
414 #endif
415 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
416 _syscall2(int, membarrier, int, cmd, int, flags)
417 #endif
418 
419 static const bitmask_transtbl fcntl_flags_tbl[] = {
420   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
421   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
422   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
423   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
424   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
425   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
426   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
427   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
428   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
429   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
430   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
431   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
432   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
433 #if defined(O_DIRECT)
434   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
435 #endif
436 #if defined(O_NOATIME)
437   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
438 #endif
439 #if defined(O_CLOEXEC)
440   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
441 #endif
442 #if defined(O_PATH)
443   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
444 #endif
445 #if defined(O_TMPFILE)
446   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
447 #endif
448   /* Don't terminate the list prematurely on 64-bit host+guest.  */
449 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
450   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
451 #endif
452   { 0, 0, 0, 0 }
453 };
454 
455 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
456 
457 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
458 #if defined(__NR_utimensat)
459 #define __NR_sys_utimensat __NR_utimensat
460 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
461           const struct timespec *,tsp,int,flags)
462 #else
463 static int sys_utimensat(int dirfd, const char *pathname,
464                          const struct timespec times[2], int flags)
465 {
466     errno = ENOSYS;
467     return -1;
468 }
469 #endif
470 #endif /* TARGET_NR_utimensat */
471 
472 #ifdef TARGET_NR_renameat2
473 #if defined(__NR_renameat2)
474 #define __NR_sys_renameat2 __NR_renameat2
475 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
476           const char *, new, unsigned int, flags)
477 #else
478 static int sys_renameat2(int oldfd, const char *old,
479                          int newfd, const char *new, int flags)
480 {
481     if (flags == 0) {
482         return renameat(oldfd, old, newfd, new);
483     }
484     errno = ENOSYS;
485     return -1;
486 }
487 #endif
488 #endif /* TARGET_NR_renameat2 */
489 
490 #ifdef CONFIG_INOTIFY
491 #include <sys/inotify.h>
492 #else
493 /* Userspace can usually survive runtime without inotify */
494 #undef TARGET_NR_inotify_init
495 #undef TARGET_NR_inotify_init1
496 #undef TARGET_NR_inotify_add_watch
497 #undef TARGET_NR_inotify_rm_watch
498 #endif /* CONFIG_INOTIFY  */
499 
500 #if defined(TARGET_NR_prlimit64)
501 #ifndef __NR_prlimit64
502 # define __NR_prlimit64 -1
503 #endif
504 #define __NR_sys_prlimit64 __NR_prlimit64
505 /* The glibc rlimit structure may not be that used by the underlying syscall */
506 struct host_rlimit64 {
507     uint64_t rlim_cur;
508     uint64_t rlim_max;
509 };
510 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
511           const struct host_rlimit64 *, new_limit,
512           struct host_rlimit64 *, old_limit)
513 #endif
514 
515 
516 #if defined(TARGET_NR_timer_create)
517 /* Maximum of 32 active POSIX timers allowed at any one time. */
518 #define GUEST_TIMER_MAX 32
519 static timer_t g_posix_timers[GUEST_TIMER_MAX];
520 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
521 
522 static inline int next_free_host_timer(void)
523 {
524     int k;
525     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
526         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
527             return k;
528         }
529     }
530     return -1;
531 }
532 
533 static inline void free_host_timer_slot(int id)
534 {
535     qatomic_store_release(g_posix_timer_allocated + id, 0);
536 }
537 #endif
538 
539 static inline int host_to_target_errno(int host_errno)
540 {
541     switch (host_errno) {
542 #define E(X)  case X: return TARGET_##X;
543 #include "errnos.c.inc"
544 #undef E
545     default:
546         return host_errno;
547     }
548 }
549 
550 static inline int target_to_host_errno(int target_errno)
551 {
552     switch (target_errno) {
553 #define E(X)  case TARGET_##X: return X;
554 #include "errnos.c.inc"
555 #undef E
556     default:
557         return target_errno;
558     }
559 }
560 
561 abi_long get_errno(abi_long ret)
562 {
563     if (ret == -1)
564         return -host_to_target_errno(errno);
565     else
566         return ret;
567 }
568 
569 const char *target_strerror(int err)
570 {
571     if (err == QEMU_ERESTARTSYS) {
572         return "To be restarted";
573     }
574     if (err == QEMU_ESIGRETURN) {
575         return "Successful exit from sigreturn";
576     }
577 
578     return strerror(target_to_host_errno(err));
579 }
580 
581 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
582 {
583     int i;
584     uint8_t b;
585     if (usize <= ksize) {
586         return 1;
587     }
588     for (i = ksize; i < usize; i++) {
589         if (get_user_u8(b, addr + i)) {
590             return -TARGET_EFAULT;
591         }
592         if (b != 0) {
593             return 0;
594         }
595     }
596     return 1;
597 }
598 
599 #define safe_syscall0(type, name) \
600 static type safe_##name(void) \
601 { \
602     return safe_syscall(__NR_##name); \
603 }
604 
605 #define safe_syscall1(type, name, type1, arg1) \
606 static type safe_##name(type1 arg1) \
607 { \
608     return safe_syscall(__NR_##name, arg1); \
609 }
610 
611 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
612 static type safe_##name(type1 arg1, type2 arg2) \
613 { \
614     return safe_syscall(__NR_##name, arg1, arg2); \
615 }
616 
617 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
618 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
621 }
622 
623 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
624     type4, arg4) \
625 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
626 { \
627     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
628 }
629 
630 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
631     type4, arg4, type5, arg5) \
632 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
633     type5 arg5) \
634 { \
635     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
636 }
637 
638 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
639     type4, arg4, type5, arg5, type6, arg6) \
640 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641     type5 arg5, type6 arg6) \
642 { \
643     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
644 }
645 
646 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
647 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
648 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
649               int, flags, mode_t, mode)
650 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
651 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
652               struct rusage *, rusage)
653 #endif
654 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
655               int, options, struct rusage *, rusage)
656 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
657               char **, argv, char **, envp, int, flags)
658 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
659     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
660 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
661               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
662 #endif
663 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
664 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
665               struct timespec *, tsp, const sigset_t *, sigmask,
666               size_t, sigsetsize)
667 #endif
668 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
669               int, maxevents, int, timeout, const sigset_t *, sigmask,
670               size_t, sigsetsize)
671 #if defined(__NR_futex)
672 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
673               const struct timespec *,timeout,int *,uaddr2,int,val3)
674 #endif
675 #if defined(__NR_futex_time64)
676 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
677               const struct timespec *,timeout,int *,uaddr2,int,val3)
678 #endif
679 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
680 safe_syscall2(int, kill, pid_t, pid, int, sig)
681 safe_syscall2(int, tkill, int, tid, int, sig)
682 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
683 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
684 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
685 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
686               unsigned long, pos_l, unsigned long, pos_h)
687 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
688               unsigned long, pos_l, unsigned long, pos_h)
689 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
690               socklen_t, addrlen)
691 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
692               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
693 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
694               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
695 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
696 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
697 safe_syscall2(int, flock, int, fd, int, operation)
698 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
699 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
700               const struct timespec *, uts, size_t, sigsetsize)
701 #endif
702 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
703               int, flags)
704 #if defined(TARGET_NR_nanosleep)
705 safe_syscall2(int, nanosleep, const struct timespec *, req,
706               struct timespec *, rem)
707 #endif
708 #if defined(TARGET_NR_clock_nanosleep) || \
709     defined(TARGET_NR_clock_nanosleep_time64)
710 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
711               const struct timespec *, req, struct timespec *, rem)
712 #endif
713 #ifdef __NR_ipc
714 #ifdef __s390x__
715 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
716               void *, ptr)
717 #else
718 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
719               void *, ptr, long, fifth)
720 #endif
721 #endif
722 #ifdef __NR_msgsnd
723 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
724               int, flags)
725 #endif
726 #ifdef __NR_msgrcv
727 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
728               long, msgtype, int, flags)
729 #endif
730 #ifdef __NR_semtimedop
731 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
732               unsigned, nsops, const struct timespec *, timeout)
733 #endif
734 #if defined(TARGET_NR_mq_timedsend) || \
735     defined(TARGET_NR_mq_timedsend_time64)
736 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
737               size_t, len, unsigned, prio, const struct timespec *, timeout)
738 #endif
739 #if defined(TARGET_NR_mq_timedreceive) || \
740     defined(TARGET_NR_mq_timedreceive_time64)
741 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
742               size_t, len, unsigned *, prio, const struct timespec *, timeout)
743 #endif
744 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
745 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
746               int, outfd, loff_t *, poutoff, size_t, length,
747               unsigned int, flags)
748 #endif
749 
750 /* We do ioctl like this rather than via safe_syscall3 to preserve the
751  * "third argument might be integer or pointer or not present" behaviour of
752  * the libc function.
753  */
754 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
755 /* Similarly for fcntl. Note that callers must always:
756  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
757  *  use the flock64 struct rather than unsuffixed flock
758  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
759  */
760 #ifdef __NR_fcntl64
761 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
762 #else
763 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
764 #endif
765 
766 static inline int host_to_target_sock_type(int host_type)
767 {
768     int target_type;
769 
770     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
771     case SOCK_DGRAM:
772         target_type = TARGET_SOCK_DGRAM;
773         break;
774     case SOCK_STREAM:
775         target_type = TARGET_SOCK_STREAM;
776         break;
777     default:
778         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
779         break;
780     }
781 
782 #if defined(SOCK_CLOEXEC)
783     if (host_type & SOCK_CLOEXEC) {
784         target_type |= TARGET_SOCK_CLOEXEC;
785     }
786 #endif
787 
788 #if defined(SOCK_NONBLOCK)
789     if (host_type & SOCK_NONBLOCK) {
790         target_type |= TARGET_SOCK_NONBLOCK;
791     }
792 #endif
793 
794     return target_type;
795 }
796 
797 static abi_ulong target_brk;
798 static abi_ulong target_original_brk;
799 static abi_ulong brk_page;
800 
801 void target_set_brk(abi_ulong new_brk)
802 {
803     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
804     brk_page = HOST_PAGE_ALIGN(target_brk);
805 }
806 
807 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
808 #define DEBUGF_BRK(message, args...)
809 
810 /* do_brk() must return target values and target errnos. */
811 abi_long do_brk(abi_ulong new_brk)
812 {
813     abi_long mapped_addr;
814     abi_ulong new_alloc_size;
815 
816     /* brk pointers are always untagged */
817 
818     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
819 
820     if (!new_brk) {
821         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
822         return target_brk;
823     }
824     if (new_brk < target_original_brk) {
825         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
826                    target_brk);
827         return target_brk;
828     }
829 
830     /* If the new brk is less than the highest page reserved to the
831      * target heap allocation, set it and we're almost done...  */
832     if (new_brk <= brk_page) {
833         /* Heap contents are initialized to zero, as for anonymous
834          * mapped pages.  */
835         if (new_brk > target_brk) {
836             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
837         }
838 	target_brk = new_brk;
839         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
840 	return target_brk;
841     }
842 
843     /* We need to allocate more memory after the brk... Note that
844      * we don't use MAP_FIXED because that will map over the top of
845      * any existing mapping (like the one with the host libc or qemu
846      * itself); instead we treat "mapped but at wrong address" as
847      * a failure and unmap again.
848      */
849     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
850     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
851                                         PROT_READ|PROT_WRITE,
852                                         MAP_ANON|MAP_PRIVATE, 0, 0));
853 
854     if (mapped_addr == brk_page) {
855         /* Heap contents are initialized to zero, as for anonymous
856          * mapped pages.  Technically the new pages are already
857          * initialized to zero since they *are* anonymous mapped
858          * pages, however we have to take care with the contents that
859          * come from the remaining part of the previous page: it may
860          * contains garbage data due to a previous heap usage (grown
861          * then shrunken).  */
862         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
863 
864         target_brk = new_brk;
865         brk_page = HOST_PAGE_ALIGN(target_brk);
866         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
867             target_brk);
868         return target_brk;
869     } else if (mapped_addr != -1) {
870         /* Mapped but at wrong address, meaning there wasn't actually
871          * enough space for this brk.
872          */
873         target_munmap(mapped_addr, new_alloc_size);
874         mapped_addr = -1;
875         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
876     }
877     else {
878         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
879     }
880 
881 #if defined(TARGET_ALPHA)
882     /* We (partially) emulate OSF/1 on Alpha, which requires we
883        return a proper errno, not an unchanged brk value.  */
884     return -TARGET_ENOMEM;
885 #endif
886     /* For everything else, return the previous break. */
887     return target_brk;
888 }
889 
890 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
891     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
892 static inline abi_long copy_from_user_fdset(fd_set *fds,
893                                             abi_ulong target_fds_addr,
894                                             int n)
895 {
896     int i, nw, j, k;
897     abi_ulong b, *target_fds;
898 
899     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
900     if (!(target_fds = lock_user(VERIFY_READ,
901                                  target_fds_addr,
902                                  sizeof(abi_ulong) * nw,
903                                  1)))
904         return -TARGET_EFAULT;
905 
906     FD_ZERO(fds);
907     k = 0;
908     for (i = 0; i < nw; i++) {
909         /* grab the abi_ulong */
910         __get_user(b, &target_fds[i]);
911         for (j = 0; j < TARGET_ABI_BITS; j++) {
912             /* check the bit inside the abi_ulong */
913             if ((b >> j) & 1)
914                 FD_SET(k, fds);
915             k++;
916         }
917     }
918 
919     unlock_user(target_fds, target_fds_addr, 0);
920 
921     return 0;
922 }
923 
924 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
925                                                  abi_ulong target_fds_addr,
926                                                  int n)
927 {
928     if (target_fds_addr) {
929         if (copy_from_user_fdset(fds, target_fds_addr, n))
930             return -TARGET_EFAULT;
931         *fds_ptr = fds;
932     } else {
933         *fds_ptr = NULL;
934     }
935     return 0;
936 }
937 
938 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
939                                           const fd_set *fds,
940                                           int n)
941 {
942     int i, nw, j, k;
943     abi_long v;
944     abi_ulong *target_fds;
945 
946     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947     if (!(target_fds = lock_user(VERIFY_WRITE,
948                                  target_fds_addr,
949                                  sizeof(abi_ulong) * nw,
950                                  0)))
951         return -TARGET_EFAULT;
952 
953     k = 0;
954     for (i = 0; i < nw; i++) {
955         v = 0;
956         for (j = 0; j < TARGET_ABI_BITS; j++) {
957             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
958             k++;
959         }
960         __put_user(v, &target_fds[i]);
961     }
962 
963     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
964 
965     return 0;
966 }
967 #endif
968 
969 #if defined(__alpha__)
970 #define HOST_HZ 1024
971 #else
972 #define HOST_HZ 100
973 #endif
974 
975 static inline abi_long host_to_target_clock_t(long ticks)
976 {
977 #if HOST_HZ == TARGET_HZ
978     return ticks;
979 #else
980     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
981 #endif
982 }
983 
984 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
985                                              const struct rusage *rusage)
986 {
987     struct target_rusage *target_rusage;
988 
989     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
990         return -TARGET_EFAULT;
991     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
992     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
993     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
994     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
995     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
996     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
997     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
998     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
999     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1000     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1001     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1002     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1003     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1004     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1005     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1006     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1007     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1008     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1009     unlock_user_struct(target_rusage, target_addr, 1);
1010 
1011     return 0;
1012 }
1013 
1014 #ifdef TARGET_NR_setrlimit
1015 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1016 {
1017     abi_ulong target_rlim_swap;
1018     rlim_t result;
1019 
1020     target_rlim_swap = tswapal(target_rlim);
1021     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1022         return RLIM_INFINITY;
1023 
1024     result = target_rlim_swap;
1025     if (target_rlim_swap != (rlim_t)result)
1026         return RLIM_INFINITY;
1027 
1028     return result;
1029 }
1030 #endif
1031 
1032 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1033 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1034 {
1035     abi_ulong target_rlim_swap;
1036     abi_ulong result;
1037 
1038     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1039         target_rlim_swap = TARGET_RLIM_INFINITY;
1040     else
1041         target_rlim_swap = rlim;
1042     result = tswapal(target_rlim_swap);
1043 
1044     return result;
1045 }
1046 #endif
1047 
1048 static inline int target_to_host_resource(int code)
1049 {
1050     switch (code) {
1051     case TARGET_RLIMIT_AS:
1052         return RLIMIT_AS;
1053     case TARGET_RLIMIT_CORE:
1054         return RLIMIT_CORE;
1055     case TARGET_RLIMIT_CPU:
1056         return RLIMIT_CPU;
1057     case TARGET_RLIMIT_DATA:
1058         return RLIMIT_DATA;
1059     case TARGET_RLIMIT_FSIZE:
1060         return RLIMIT_FSIZE;
1061     case TARGET_RLIMIT_LOCKS:
1062         return RLIMIT_LOCKS;
1063     case TARGET_RLIMIT_MEMLOCK:
1064         return RLIMIT_MEMLOCK;
1065     case TARGET_RLIMIT_MSGQUEUE:
1066         return RLIMIT_MSGQUEUE;
1067     case TARGET_RLIMIT_NICE:
1068         return RLIMIT_NICE;
1069     case TARGET_RLIMIT_NOFILE:
1070         return RLIMIT_NOFILE;
1071     case TARGET_RLIMIT_NPROC:
1072         return RLIMIT_NPROC;
1073     case TARGET_RLIMIT_RSS:
1074         return RLIMIT_RSS;
1075     case TARGET_RLIMIT_RTPRIO:
1076         return RLIMIT_RTPRIO;
1077 #ifdef RLIMIT_RTTIME
1078     case TARGET_RLIMIT_RTTIME:
1079         return RLIMIT_RTTIME;
1080 #endif
1081     case TARGET_RLIMIT_SIGPENDING:
1082         return RLIMIT_SIGPENDING;
1083     case TARGET_RLIMIT_STACK:
1084         return RLIMIT_STACK;
1085     default:
1086         return code;
1087     }
1088 }
1089 
1090 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1091                                               abi_ulong target_tv_addr)
1092 {
1093     struct target_timeval *target_tv;
1094 
1095     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1096         return -TARGET_EFAULT;
1097     }
1098 
1099     __get_user(tv->tv_sec, &target_tv->tv_sec);
1100     __get_user(tv->tv_usec, &target_tv->tv_usec);
1101 
1102     unlock_user_struct(target_tv, target_tv_addr, 0);
1103 
1104     return 0;
1105 }
1106 
1107 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1108                                             const struct timeval *tv)
1109 {
1110     struct target_timeval *target_tv;
1111 
1112     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1113         return -TARGET_EFAULT;
1114     }
1115 
1116     __put_user(tv->tv_sec, &target_tv->tv_sec);
1117     __put_user(tv->tv_usec, &target_tv->tv_usec);
1118 
1119     unlock_user_struct(target_tv, target_tv_addr, 1);
1120 
1121     return 0;
1122 }
1123 
1124 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1125 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1126                                                 abi_ulong target_tv_addr)
1127 {
1128     struct target__kernel_sock_timeval *target_tv;
1129 
1130     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1131         return -TARGET_EFAULT;
1132     }
1133 
1134     __get_user(tv->tv_sec, &target_tv->tv_sec);
1135     __get_user(tv->tv_usec, &target_tv->tv_usec);
1136 
1137     unlock_user_struct(target_tv, target_tv_addr, 0);
1138 
1139     return 0;
1140 }
1141 #endif
1142 
1143 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1144                                               const struct timeval *tv)
1145 {
1146     struct target__kernel_sock_timeval *target_tv;
1147 
1148     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1149         return -TARGET_EFAULT;
1150     }
1151 
1152     __put_user(tv->tv_sec, &target_tv->tv_sec);
1153     __put_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 1);
1156 
1157     return 0;
1158 }
1159 
1160 #if defined(TARGET_NR_futex) || \
1161     defined(TARGET_NR_rt_sigtimedwait) || \
1162     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1163     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1164     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1165     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1166     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1167     defined(TARGET_NR_timer_settime) || \
1168     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1169 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1170                                                abi_ulong target_addr)
1171 {
1172     struct target_timespec *target_ts;
1173 
1174     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1175         return -TARGET_EFAULT;
1176     }
1177     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1178     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179     unlock_user_struct(target_ts, target_addr, 0);
1180     return 0;
1181 }
1182 #endif
1183 
1184 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1185     defined(TARGET_NR_timer_settime64) || \
1186     defined(TARGET_NR_mq_timedsend_time64) || \
1187     defined(TARGET_NR_mq_timedreceive_time64) || \
1188     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1189     defined(TARGET_NR_clock_nanosleep_time64) || \
1190     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1191     defined(TARGET_NR_utimensat) || \
1192     defined(TARGET_NR_utimensat_time64) || \
1193     defined(TARGET_NR_semtimedop_time64) || \
1194     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1195 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1196                                                  abi_ulong target_addr)
1197 {
1198     struct target__kernel_timespec *target_ts;
1199 
1200     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1204     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1205     /* in 32bit mode, this drops the padding */
1206     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1207     unlock_user_struct(target_ts, target_addr, 0);
1208     return 0;
1209 }
1210 #endif
1211 
1212 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1213                                                struct timespec *host_ts)
1214 {
1215     struct target_timespec *target_ts;
1216 
1217     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1218         return -TARGET_EFAULT;
1219     }
1220     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1221     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1222     unlock_user_struct(target_ts, target_addr, 1);
1223     return 0;
1224 }
1225 
1226 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1227                                                  struct timespec *host_ts)
1228 {
1229     struct target__kernel_timespec *target_ts;
1230 
1231     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1232         return -TARGET_EFAULT;
1233     }
1234     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1235     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236     unlock_user_struct(target_ts, target_addr, 1);
1237     return 0;
1238 }
1239 
1240 #if defined(TARGET_NR_gettimeofday)
1241 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1242                                              struct timezone *tz)
1243 {
1244     struct target_timezone *target_tz;
1245 
1246     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1247         return -TARGET_EFAULT;
1248     }
1249 
1250     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1251     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1252 
1253     unlock_user_struct(target_tz, target_tz_addr, 1);
1254 
1255     return 0;
1256 }
1257 #endif
1258 
1259 #if defined(TARGET_NR_settimeofday)
1260 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1261                                                abi_ulong target_tz_addr)
1262 {
1263     struct target_timezone *target_tz;
1264 
1265     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1266         return -TARGET_EFAULT;
1267     }
1268 
1269     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1270     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1271 
1272     unlock_user_struct(target_tz, target_tz_addr, 0);
1273 
1274     return 0;
1275 }
1276 #endif
1277 
1278 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1279 #include <mqueue.h>
1280 
1281 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1282                                               abi_ulong target_mq_attr_addr)
1283 {
1284     struct target_mq_attr *target_mq_attr;
1285 
1286     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1287                           target_mq_attr_addr, 1))
1288         return -TARGET_EFAULT;
1289 
1290     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1291     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1292     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1293     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1294 
1295     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1296 
1297     return 0;
1298 }
1299 
1300 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1301                                             const struct mq_attr *attr)
1302 {
1303     struct target_mq_attr *target_mq_attr;
1304 
1305     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1306                           target_mq_attr_addr, 0))
1307         return -TARGET_EFAULT;
1308 
1309     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1310     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1311     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1312     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1313 
1314     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1315 
1316     return 0;
1317 }
1318 #endif
1319 
1320 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1321 /* do_select() must return target values and target errnos. */
1322 static abi_long do_select(int n,
1323                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1324                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1325 {
1326     fd_set rfds, wfds, efds;
1327     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1328     struct timeval tv;
1329     struct timespec ts, *ts_ptr;
1330     abi_long ret;
1331 
1332     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1333     if (ret) {
1334         return ret;
1335     }
1336     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1337     if (ret) {
1338         return ret;
1339     }
1340     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1341     if (ret) {
1342         return ret;
1343     }
1344 
1345     if (target_tv_addr) {
1346         if (copy_from_user_timeval(&tv, target_tv_addr))
1347             return -TARGET_EFAULT;
1348         ts.tv_sec = tv.tv_sec;
1349         ts.tv_nsec = tv.tv_usec * 1000;
1350         ts_ptr = &ts;
1351     } else {
1352         ts_ptr = NULL;
1353     }
1354 
1355     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1356                                   ts_ptr, NULL));
1357 
1358     if (!is_error(ret)) {
1359         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1360             return -TARGET_EFAULT;
1361         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1362             return -TARGET_EFAULT;
1363         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1364             return -TARGET_EFAULT;
1365 
1366         if (target_tv_addr) {
1367             tv.tv_sec = ts.tv_sec;
1368             tv.tv_usec = ts.tv_nsec / 1000;
1369             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1370                 return -TARGET_EFAULT;
1371             }
1372         }
1373     }
1374 
1375     return ret;
1376 }
1377 
1378 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1379 static abi_long do_old_select(abi_ulong arg1)
1380 {
1381     struct target_sel_arg_struct *sel;
1382     abi_ulong inp, outp, exp, tvp;
1383     long nsel;
1384 
1385     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1386         return -TARGET_EFAULT;
1387     }
1388 
1389     nsel = tswapal(sel->n);
1390     inp = tswapal(sel->inp);
1391     outp = tswapal(sel->outp);
1392     exp = tswapal(sel->exp);
1393     tvp = tswapal(sel->tvp);
1394 
1395     unlock_user_struct(sel, arg1, 0);
1396 
1397     return do_select(nsel, inp, outp, exp, tvp);
1398 }
1399 #endif
1400 #endif
1401 
1402 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1403 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1404                             abi_long arg4, abi_long arg5, abi_long arg6,
1405                             bool time64)
1406 {
1407     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1408     fd_set rfds, wfds, efds;
1409     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1410     struct timespec ts, *ts_ptr;
1411     abi_long ret;
1412 
1413     /*
1414      * The 6th arg is actually two args smashed together,
1415      * so we cannot use the C library.
1416      */
1417     struct {
1418         sigset_t *set;
1419         size_t size;
1420     } sig, *sig_ptr;
1421 
1422     abi_ulong arg_sigset, arg_sigsize, *arg7;
1423 
1424     n = arg1;
1425     rfd_addr = arg2;
1426     wfd_addr = arg3;
1427     efd_addr = arg4;
1428     ts_addr = arg5;
1429 
1430     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1431     if (ret) {
1432         return ret;
1433     }
1434     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1435     if (ret) {
1436         return ret;
1437     }
1438     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1439     if (ret) {
1440         return ret;
1441     }
1442 
1443     /*
1444      * This takes a timespec, and not a timeval, so we cannot
1445      * use the do_select() helper ...
1446      */
1447     if (ts_addr) {
1448         if (time64) {
1449             if (target_to_host_timespec64(&ts, ts_addr)) {
1450                 return -TARGET_EFAULT;
1451             }
1452         } else {
1453             if (target_to_host_timespec(&ts, ts_addr)) {
1454                 return -TARGET_EFAULT;
1455             }
1456         }
1457             ts_ptr = &ts;
1458     } else {
1459         ts_ptr = NULL;
1460     }
1461 
1462     /* Extract the two packed args for the sigset */
1463     sig_ptr = NULL;
1464     if (arg6) {
1465         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1466         if (!arg7) {
1467             return -TARGET_EFAULT;
1468         }
1469         arg_sigset = tswapal(arg7[0]);
1470         arg_sigsize = tswapal(arg7[1]);
1471         unlock_user(arg7, arg6, 0);
1472 
1473         if (arg_sigset) {
1474             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1475             if (ret != 0) {
1476                 return ret;
1477             }
1478             sig_ptr = &sig;
1479             sig.size = SIGSET_T_SIZE;
1480         }
1481     }
1482 
1483     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1484                                   ts_ptr, sig_ptr));
1485 
1486     if (sig_ptr) {
1487         finish_sigsuspend_mask(ret);
1488     }
1489 
1490     if (!is_error(ret)) {
1491         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1492             return -TARGET_EFAULT;
1493         }
1494         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1495             return -TARGET_EFAULT;
1496         }
1497         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (time64) {
1501             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1502                 return -TARGET_EFAULT;
1503             }
1504         } else {
1505             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1506                 return -TARGET_EFAULT;
1507             }
1508         }
1509     }
1510     return ret;
1511 }
1512 #endif
1513 
1514 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1515     defined(TARGET_NR_ppoll_time64)
1516 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1517                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1518 {
1519     struct target_pollfd *target_pfd;
1520     unsigned int nfds = arg2;
1521     struct pollfd *pfd;
1522     unsigned int i;
1523     abi_long ret;
1524 
1525     pfd = NULL;
1526     target_pfd = NULL;
1527     if (nfds) {
1528         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1529             return -TARGET_EINVAL;
1530         }
1531         target_pfd = lock_user(VERIFY_WRITE, arg1,
1532                                sizeof(struct target_pollfd) * nfds, 1);
1533         if (!target_pfd) {
1534             return -TARGET_EFAULT;
1535         }
1536 
1537         pfd = alloca(sizeof(struct pollfd) * nfds);
1538         for (i = 0; i < nfds; i++) {
1539             pfd[i].fd = tswap32(target_pfd[i].fd);
1540             pfd[i].events = tswap16(target_pfd[i].events);
1541         }
1542     }
1543     if (ppoll) {
1544         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1545         sigset_t *set = NULL;
1546 
1547         if (arg3) {
1548             if (time64) {
1549                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1550                     unlock_user(target_pfd, arg1, 0);
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (target_to_host_timespec(timeout_ts, arg3)) {
1555                     unlock_user(target_pfd, arg1, 0);
1556                     return -TARGET_EFAULT;
1557                 }
1558             }
1559         } else {
1560             timeout_ts = NULL;
1561         }
1562 
1563         if (arg4) {
1564             ret = process_sigsuspend_mask(&set, arg4, arg5);
1565             if (ret != 0) {
1566                 unlock_user(target_pfd, arg1, 0);
1567                 return ret;
1568             }
1569         }
1570 
1571         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1572                                    set, SIGSET_T_SIZE));
1573 
1574         if (set) {
1575             finish_sigsuspend_mask(ret);
1576         }
1577         if (!is_error(ret) && arg3) {
1578             if (time64) {
1579                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1580                     return -TARGET_EFAULT;
1581                 }
1582             } else {
1583                 if (host_to_target_timespec(arg3, timeout_ts)) {
1584                     return -TARGET_EFAULT;
1585                 }
1586             }
1587         }
1588     } else {
1589           struct timespec ts, *pts;
1590 
1591           if (arg3 >= 0) {
1592               /* Convert ms to secs, ns */
1593               ts.tv_sec = arg3 / 1000;
1594               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1595               pts = &ts;
1596           } else {
1597               /* -ve poll() timeout means "infinite" */
1598               pts = NULL;
1599           }
1600           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1601     }
1602 
1603     if (!is_error(ret)) {
1604         for (i = 0; i < nfds; i++) {
1605             target_pfd[i].revents = tswap16(pfd[i].revents);
1606         }
1607     }
1608     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1609     return ret;
1610 }
1611 #endif
1612 
1613 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1614                         int flags, int is_pipe2)
1615 {
1616     int host_pipe[2];
1617     abi_long ret;
1618     ret = pipe2(host_pipe, flags);
1619 
1620     if (is_error(ret))
1621         return get_errno(ret);
1622 
1623     /* Several targets have special calling conventions for the original
1624        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1625     if (!is_pipe2) {
1626 #if defined(TARGET_ALPHA)
1627         cpu_env->ir[IR_A4] = host_pipe[1];
1628         return host_pipe[0];
1629 #elif defined(TARGET_MIPS)
1630         cpu_env->active_tc.gpr[3] = host_pipe[1];
1631         return host_pipe[0];
1632 #elif defined(TARGET_SH4)
1633         cpu_env->gregs[1] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_SPARC)
1636         cpu_env->regwptr[1] = host_pipe[1];
1637         return host_pipe[0];
1638 #endif
1639     }
1640 
1641     if (put_user_s32(host_pipe[0], pipedes)
1642         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1643         return -TARGET_EFAULT;
1644     return get_errno(ret);
1645 }
1646 
1647 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1648                                               abi_ulong target_addr,
1649                                               socklen_t len)
1650 {
1651     struct target_ip_mreqn *target_smreqn;
1652 
1653     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1654     if (!target_smreqn)
1655         return -TARGET_EFAULT;
1656     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1657     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1658     if (len == sizeof(struct target_ip_mreqn))
1659         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1660     unlock_user(target_smreqn, target_addr, 0);
1661 
1662     return 0;
1663 }
1664 
1665 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1666                                                abi_ulong target_addr,
1667                                                socklen_t len)
1668 {
1669     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1670     sa_family_t sa_family;
1671     struct target_sockaddr *target_saddr;
1672 
1673     if (fd_trans_target_to_host_addr(fd)) {
1674         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1675     }
1676 
1677     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1678     if (!target_saddr)
1679         return -TARGET_EFAULT;
1680 
1681     sa_family = tswap16(target_saddr->sa_family);
1682 
1683     /* Oops. The caller might send a incomplete sun_path; sun_path
1684      * must be terminated by \0 (see the manual page), but
1685      * unfortunately it is quite common to specify sockaddr_un
1686      * length as "strlen(x->sun_path)" while it should be
1687      * "strlen(...) + 1". We'll fix that here if needed.
1688      * Linux kernel has a similar feature.
1689      */
1690 
1691     if (sa_family == AF_UNIX) {
1692         if (len < unix_maxlen && len > 0) {
1693             char *cp = (char*)target_saddr;
1694 
1695             if ( cp[len-1] && !cp[len] )
1696                 len++;
1697         }
1698         if (len > unix_maxlen)
1699             len = unix_maxlen;
1700     }
1701 
1702     memcpy(addr, target_saddr, len);
1703     addr->sa_family = sa_family;
1704     if (sa_family == AF_NETLINK) {
1705         struct sockaddr_nl *nladdr;
1706 
1707         nladdr = (struct sockaddr_nl *)addr;
1708         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1709         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1710     } else if (sa_family == AF_PACKET) {
1711 	struct target_sockaddr_ll *lladdr;
1712 
1713 	lladdr = (struct target_sockaddr_ll *)addr;
1714 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1715 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1716     }
1717     unlock_user(target_saddr, target_addr, 0);
1718 
1719     return 0;
1720 }
1721 
1722 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1723                                                struct sockaddr *addr,
1724                                                socklen_t len)
1725 {
1726     struct target_sockaddr *target_saddr;
1727 
1728     if (len == 0) {
1729         return 0;
1730     }
1731     assert(addr);
1732 
1733     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1734     if (!target_saddr)
1735         return -TARGET_EFAULT;
1736     memcpy(target_saddr, addr, len);
1737     if (len >= offsetof(struct target_sockaddr, sa_family) +
1738         sizeof(target_saddr->sa_family)) {
1739         target_saddr->sa_family = tswap16(addr->sa_family);
1740     }
1741     if (addr->sa_family == AF_NETLINK &&
1742         len >= sizeof(struct target_sockaddr_nl)) {
1743         struct target_sockaddr_nl *target_nl =
1744                (struct target_sockaddr_nl *)target_saddr;
1745         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1746         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1747     } else if (addr->sa_family == AF_PACKET) {
1748         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1749         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1750         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1751     } else if (addr->sa_family == AF_INET6 &&
1752                len >= sizeof(struct target_sockaddr_in6)) {
1753         struct target_sockaddr_in6 *target_in6 =
1754                (struct target_sockaddr_in6 *)target_saddr;
1755         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1756     }
1757     unlock_user(target_saddr, target_addr, len);
1758 
1759     return 0;
1760 }
1761 
1762 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1763                                            struct target_msghdr *target_msgh)
1764 {
1765     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1766     abi_long msg_controllen;
1767     abi_ulong target_cmsg_addr;
1768     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1769     socklen_t space = 0;
1770 
1771     msg_controllen = tswapal(target_msgh->msg_controllen);
1772     if (msg_controllen < sizeof (struct target_cmsghdr))
1773         goto the_end;
1774     target_cmsg_addr = tswapal(target_msgh->msg_control);
1775     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1776     target_cmsg_start = target_cmsg;
1777     if (!target_cmsg)
1778         return -TARGET_EFAULT;
1779 
1780     while (cmsg && target_cmsg) {
1781         void *data = CMSG_DATA(cmsg);
1782         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1783 
1784         int len = tswapal(target_cmsg->cmsg_len)
1785             - sizeof(struct target_cmsghdr);
1786 
1787         space += CMSG_SPACE(len);
1788         if (space > msgh->msg_controllen) {
1789             space -= CMSG_SPACE(len);
1790             /* This is a QEMU bug, since we allocated the payload
1791              * area ourselves (unlike overflow in host-to-target
1792              * conversion, which is just the guest giving us a buffer
1793              * that's too small). It can't happen for the payload types
1794              * we currently support; if it becomes an issue in future
1795              * we would need to improve our allocation strategy to
1796              * something more intelligent than "twice the size of the
1797              * target buffer we're reading from".
1798              */
1799             qemu_log_mask(LOG_UNIMP,
1800                           ("Unsupported ancillary data %d/%d: "
1801                            "unhandled msg size\n"),
1802                           tswap32(target_cmsg->cmsg_level),
1803                           tswap32(target_cmsg->cmsg_type));
1804             break;
1805         }
1806 
1807         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1808             cmsg->cmsg_level = SOL_SOCKET;
1809         } else {
1810             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1811         }
1812         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1813         cmsg->cmsg_len = CMSG_LEN(len);
1814 
1815         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1816             int *fd = (int *)data;
1817             int *target_fd = (int *)target_data;
1818             int i, numfds = len / sizeof(int);
1819 
1820             for (i = 0; i < numfds; i++) {
1821                 __get_user(fd[i], target_fd + i);
1822             }
1823         } else if (cmsg->cmsg_level == SOL_SOCKET
1824                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1825             struct ucred *cred = (struct ucred *)data;
1826             struct target_ucred *target_cred =
1827                 (struct target_ucred *)target_data;
1828 
1829             __get_user(cred->pid, &target_cred->pid);
1830             __get_user(cred->uid, &target_cred->uid);
1831             __get_user(cred->gid, &target_cred->gid);
1832         } else {
1833             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1834                           cmsg->cmsg_level, cmsg->cmsg_type);
1835             memcpy(data, target_data, len);
1836         }
1837 
1838         cmsg = CMSG_NXTHDR(msgh, cmsg);
1839         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1840                                          target_cmsg_start);
1841     }
1842     unlock_user(target_cmsg, target_cmsg_addr, 0);
1843  the_end:
1844     msgh->msg_controllen = space;
1845     return 0;
1846 }
1847 
1848 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1849                                            struct msghdr *msgh)
1850 {
1851     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1852     abi_long msg_controllen;
1853     abi_ulong target_cmsg_addr;
1854     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1855     socklen_t space = 0;
1856 
1857     msg_controllen = tswapal(target_msgh->msg_controllen);
1858     if (msg_controllen < sizeof (struct target_cmsghdr))
1859         goto the_end;
1860     target_cmsg_addr = tswapal(target_msgh->msg_control);
1861     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1862     target_cmsg_start = target_cmsg;
1863     if (!target_cmsg)
1864         return -TARGET_EFAULT;
1865 
1866     while (cmsg && target_cmsg) {
1867         void *data = CMSG_DATA(cmsg);
1868         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1869 
1870         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1871         int tgt_len, tgt_space;
1872 
1873         /* We never copy a half-header but may copy half-data;
1874          * this is Linux's behaviour in put_cmsg(). Note that
1875          * truncation here is a guest problem (which we report
1876          * to the guest via the CTRUNC bit), unlike truncation
1877          * in target_to_host_cmsg, which is a QEMU bug.
1878          */
1879         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1880             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1881             break;
1882         }
1883 
1884         if (cmsg->cmsg_level == SOL_SOCKET) {
1885             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1886         } else {
1887             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1888         }
1889         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1890 
1891         /* Payload types which need a different size of payload on
1892          * the target must adjust tgt_len here.
1893          */
1894         tgt_len = len;
1895         switch (cmsg->cmsg_level) {
1896         case SOL_SOCKET:
1897             switch (cmsg->cmsg_type) {
1898             case SO_TIMESTAMP:
1899                 tgt_len = sizeof(struct target_timeval);
1900                 break;
1901             default:
1902                 break;
1903             }
1904             break;
1905         default:
1906             break;
1907         }
1908 
1909         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1910             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1911             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1912         }
1913 
1914         /* We must now copy-and-convert len bytes of payload
1915          * into tgt_len bytes of destination space. Bear in mind
1916          * that in both source and destination we may be dealing
1917          * with a truncated value!
1918          */
1919         switch (cmsg->cmsg_level) {
1920         case SOL_SOCKET:
1921             switch (cmsg->cmsg_type) {
1922             case SCM_RIGHTS:
1923             {
1924                 int *fd = (int *)data;
1925                 int *target_fd = (int *)target_data;
1926                 int i, numfds = tgt_len / sizeof(int);
1927 
1928                 for (i = 0; i < numfds; i++) {
1929                     __put_user(fd[i], target_fd + i);
1930                 }
1931                 break;
1932             }
1933             case SO_TIMESTAMP:
1934             {
1935                 struct timeval *tv = (struct timeval *)data;
1936                 struct target_timeval *target_tv =
1937                     (struct target_timeval *)target_data;
1938 
1939                 if (len != sizeof(struct timeval) ||
1940                     tgt_len != sizeof(struct target_timeval)) {
1941                     goto unimplemented;
1942                 }
1943 
1944                 /* copy struct timeval to target */
1945                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1946                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1947                 break;
1948             }
1949             case SCM_CREDENTIALS:
1950             {
1951                 struct ucred *cred = (struct ucred *)data;
1952                 struct target_ucred *target_cred =
1953                     (struct target_ucred *)target_data;
1954 
1955                 __put_user(cred->pid, &target_cred->pid);
1956                 __put_user(cred->uid, &target_cred->uid);
1957                 __put_user(cred->gid, &target_cred->gid);
1958                 break;
1959             }
1960             default:
1961                 goto unimplemented;
1962             }
1963             break;
1964 
1965         case SOL_IP:
1966             switch (cmsg->cmsg_type) {
1967             case IP_TTL:
1968             {
1969                 uint32_t *v = (uint32_t *)data;
1970                 uint32_t *t_int = (uint32_t *)target_data;
1971 
1972                 if (len != sizeof(uint32_t) ||
1973                     tgt_len != sizeof(uint32_t)) {
1974                     goto unimplemented;
1975                 }
1976                 __put_user(*v, t_int);
1977                 break;
1978             }
1979             case IP_RECVERR:
1980             {
1981                 struct errhdr_t {
1982                    struct sock_extended_err ee;
1983                    struct sockaddr_in offender;
1984                 };
1985                 struct errhdr_t *errh = (struct errhdr_t *)data;
1986                 struct errhdr_t *target_errh =
1987                     (struct errhdr_t *)target_data;
1988 
1989                 if (len != sizeof(struct errhdr_t) ||
1990                     tgt_len != sizeof(struct errhdr_t)) {
1991                     goto unimplemented;
1992                 }
1993                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1994                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1995                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1996                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1997                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1998                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1999                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2000                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2001                     (void *) &errh->offender, sizeof(errh->offender));
2002                 break;
2003             }
2004             default:
2005                 goto unimplemented;
2006             }
2007             break;
2008 
2009         case SOL_IPV6:
2010             switch (cmsg->cmsg_type) {
2011             case IPV6_HOPLIMIT:
2012             {
2013                 uint32_t *v = (uint32_t *)data;
2014                 uint32_t *t_int = (uint32_t *)target_data;
2015 
2016                 if (len != sizeof(uint32_t) ||
2017                     tgt_len != sizeof(uint32_t)) {
2018                     goto unimplemented;
2019                 }
2020                 __put_user(*v, t_int);
2021                 break;
2022             }
2023             case IPV6_RECVERR:
2024             {
2025                 struct errhdr6_t {
2026                    struct sock_extended_err ee;
2027                    struct sockaddr_in6 offender;
2028                 };
2029                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2030                 struct errhdr6_t *target_errh =
2031                     (struct errhdr6_t *)target_data;
2032 
2033                 if (len != sizeof(struct errhdr6_t) ||
2034                     tgt_len != sizeof(struct errhdr6_t)) {
2035                     goto unimplemented;
2036                 }
2037                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2038                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2039                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2040                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2041                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2042                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2043                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2044                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2045                     (void *) &errh->offender, sizeof(errh->offender));
2046                 break;
2047             }
2048             default:
2049                 goto unimplemented;
2050             }
2051             break;
2052 
2053         default:
2054         unimplemented:
2055             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2056                           cmsg->cmsg_level, cmsg->cmsg_type);
2057             memcpy(target_data, data, MIN(len, tgt_len));
2058             if (tgt_len > len) {
2059                 memset(target_data + len, 0, tgt_len - len);
2060             }
2061         }
2062 
2063         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2064         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2065         if (msg_controllen < tgt_space) {
2066             tgt_space = msg_controllen;
2067         }
2068         msg_controllen -= tgt_space;
2069         space += tgt_space;
2070         cmsg = CMSG_NXTHDR(msgh, cmsg);
2071         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2072                                          target_cmsg_start);
2073     }
2074     unlock_user(target_cmsg, target_cmsg_addr, space);
2075  the_end:
2076     target_msgh->msg_controllen = tswapal(space);
2077     return 0;
2078 }
2079 
2080 /* do_setsockopt() Must return target values and target errnos. */
2081 static abi_long do_setsockopt(int sockfd, int level, int optname,
2082                               abi_ulong optval_addr, socklen_t optlen)
2083 {
2084     abi_long ret;
2085     int val;
2086     struct ip_mreqn *ip_mreq;
2087     struct ip_mreq_source *ip_mreq_source;
2088 
2089     switch(level) {
2090     case SOL_TCP:
2091     case SOL_UDP:
2092         /* TCP and UDP options all take an 'int' value.  */
2093         if (optlen < sizeof(uint32_t))
2094             return -TARGET_EINVAL;
2095 
2096         if (get_user_u32(val, optval_addr))
2097             return -TARGET_EFAULT;
2098         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2099         break;
2100     case SOL_IP:
2101         switch(optname) {
2102         case IP_TOS:
2103         case IP_TTL:
2104         case IP_HDRINCL:
2105         case IP_ROUTER_ALERT:
2106         case IP_RECVOPTS:
2107         case IP_RETOPTS:
2108         case IP_PKTINFO:
2109         case IP_MTU_DISCOVER:
2110         case IP_RECVERR:
2111         case IP_RECVTTL:
2112         case IP_RECVTOS:
2113 #ifdef IP_FREEBIND
2114         case IP_FREEBIND:
2115 #endif
2116         case IP_MULTICAST_TTL:
2117         case IP_MULTICAST_LOOP:
2118             val = 0;
2119             if (optlen >= sizeof(uint32_t)) {
2120                 if (get_user_u32(val, optval_addr))
2121                     return -TARGET_EFAULT;
2122             } else if (optlen >= 1) {
2123                 if (get_user_u8(val, optval_addr))
2124                     return -TARGET_EFAULT;
2125             }
2126             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2127             break;
2128         case IP_ADD_MEMBERSHIP:
2129         case IP_DROP_MEMBERSHIP:
2130             if (optlen < sizeof (struct target_ip_mreq) ||
2131                 optlen > sizeof (struct target_ip_mreqn))
2132                 return -TARGET_EINVAL;
2133 
2134             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2135             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2136             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2137             break;
2138 
2139         case IP_BLOCK_SOURCE:
2140         case IP_UNBLOCK_SOURCE:
2141         case IP_ADD_SOURCE_MEMBERSHIP:
2142         case IP_DROP_SOURCE_MEMBERSHIP:
2143             if (optlen != sizeof (struct target_ip_mreq_source))
2144                 return -TARGET_EINVAL;
2145 
2146             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2147             if (!ip_mreq_source) {
2148                 return -TARGET_EFAULT;
2149             }
2150             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2151             unlock_user (ip_mreq_source, optval_addr, 0);
2152             break;
2153 
2154         default:
2155             goto unimplemented;
2156         }
2157         break;
2158     case SOL_IPV6:
2159         switch (optname) {
2160         case IPV6_MTU_DISCOVER:
2161         case IPV6_MTU:
2162         case IPV6_V6ONLY:
2163         case IPV6_RECVPKTINFO:
2164         case IPV6_UNICAST_HOPS:
2165         case IPV6_MULTICAST_HOPS:
2166         case IPV6_MULTICAST_LOOP:
2167         case IPV6_RECVERR:
2168         case IPV6_RECVHOPLIMIT:
2169         case IPV6_2292HOPLIMIT:
2170         case IPV6_CHECKSUM:
2171         case IPV6_ADDRFORM:
2172         case IPV6_2292PKTINFO:
2173         case IPV6_RECVTCLASS:
2174         case IPV6_RECVRTHDR:
2175         case IPV6_2292RTHDR:
2176         case IPV6_RECVHOPOPTS:
2177         case IPV6_2292HOPOPTS:
2178         case IPV6_RECVDSTOPTS:
2179         case IPV6_2292DSTOPTS:
2180         case IPV6_TCLASS:
2181         case IPV6_ADDR_PREFERENCES:
2182 #ifdef IPV6_RECVPATHMTU
2183         case IPV6_RECVPATHMTU:
2184 #endif
2185 #ifdef IPV6_TRANSPARENT
2186         case IPV6_TRANSPARENT:
2187 #endif
2188 #ifdef IPV6_FREEBIND
2189         case IPV6_FREEBIND:
2190 #endif
2191 #ifdef IPV6_RECVORIGDSTADDR
2192         case IPV6_RECVORIGDSTADDR:
2193 #endif
2194             val = 0;
2195             if (optlen < sizeof(uint32_t)) {
2196                 return -TARGET_EINVAL;
2197             }
2198             if (get_user_u32(val, optval_addr)) {
2199                 return -TARGET_EFAULT;
2200             }
2201             ret = get_errno(setsockopt(sockfd, level, optname,
2202                                        &val, sizeof(val)));
2203             break;
2204         case IPV6_PKTINFO:
2205         {
2206             struct in6_pktinfo pki;
2207 
2208             if (optlen < sizeof(pki)) {
2209                 return -TARGET_EINVAL;
2210             }
2211 
2212             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2213                 return -TARGET_EFAULT;
2214             }
2215 
2216             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2217 
2218             ret = get_errno(setsockopt(sockfd, level, optname,
2219                                        &pki, sizeof(pki)));
2220             break;
2221         }
2222         case IPV6_ADD_MEMBERSHIP:
2223         case IPV6_DROP_MEMBERSHIP:
2224         {
2225             struct ipv6_mreq ipv6mreq;
2226 
2227             if (optlen < sizeof(ipv6mreq)) {
2228                 return -TARGET_EINVAL;
2229             }
2230 
2231             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2232                 return -TARGET_EFAULT;
2233             }
2234 
2235             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2236 
2237             ret = get_errno(setsockopt(sockfd, level, optname,
2238                                        &ipv6mreq, sizeof(ipv6mreq)));
2239             break;
2240         }
2241         default:
2242             goto unimplemented;
2243         }
2244         break;
2245     case SOL_ICMPV6:
2246         switch (optname) {
2247         case ICMPV6_FILTER:
2248         {
2249             struct icmp6_filter icmp6f;
2250 
2251             if (optlen > sizeof(icmp6f)) {
2252                 optlen = sizeof(icmp6f);
2253             }
2254 
2255             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2256                 return -TARGET_EFAULT;
2257             }
2258 
2259             for (val = 0; val < 8; val++) {
2260                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2261             }
2262 
2263             ret = get_errno(setsockopt(sockfd, level, optname,
2264                                        &icmp6f, optlen));
2265             break;
2266         }
2267         default:
2268             goto unimplemented;
2269         }
2270         break;
2271     case SOL_RAW:
2272         switch (optname) {
2273         case ICMP_FILTER:
2274         case IPV6_CHECKSUM:
2275             /* those take an u32 value */
2276             if (optlen < sizeof(uint32_t)) {
2277                 return -TARGET_EINVAL;
2278             }
2279 
2280             if (get_user_u32(val, optval_addr)) {
2281                 return -TARGET_EFAULT;
2282             }
2283             ret = get_errno(setsockopt(sockfd, level, optname,
2284                                        &val, sizeof(val)));
2285             break;
2286 
2287         default:
2288             goto unimplemented;
2289         }
2290         break;
2291 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2292     case SOL_ALG:
2293         switch (optname) {
2294         case ALG_SET_KEY:
2295         {
2296             char *alg_key = g_malloc(optlen);
2297 
2298             if (!alg_key) {
2299                 return -TARGET_ENOMEM;
2300             }
2301             if (copy_from_user(alg_key, optval_addr, optlen)) {
2302                 g_free(alg_key);
2303                 return -TARGET_EFAULT;
2304             }
2305             ret = get_errno(setsockopt(sockfd, level, optname,
2306                                        alg_key, optlen));
2307             g_free(alg_key);
2308             break;
2309         }
2310         case ALG_SET_AEAD_AUTHSIZE:
2311         {
2312             ret = get_errno(setsockopt(sockfd, level, optname,
2313                                        NULL, optlen));
2314             break;
2315         }
2316         default:
2317             goto unimplemented;
2318         }
2319         break;
2320 #endif
2321     case TARGET_SOL_SOCKET:
2322         switch (optname) {
2323         case TARGET_SO_RCVTIMEO:
2324         {
2325                 struct timeval tv;
2326 
2327                 optname = SO_RCVTIMEO;
2328 
2329 set_timeout:
2330                 if (optlen != sizeof(struct target_timeval)) {
2331                     return -TARGET_EINVAL;
2332                 }
2333 
2334                 if (copy_from_user_timeval(&tv, optval_addr)) {
2335                     return -TARGET_EFAULT;
2336                 }
2337 
2338                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2339                                 &tv, sizeof(tv)));
2340                 return ret;
2341         }
2342         case TARGET_SO_SNDTIMEO:
2343                 optname = SO_SNDTIMEO;
2344                 goto set_timeout;
2345         case TARGET_SO_ATTACH_FILTER:
2346         {
2347                 struct target_sock_fprog *tfprog;
2348                 struct target_sock_filter *tfilter;
2349                 struct sock_fprog fprog;
2350                 struct sock_filter *filter;
2351                 int i;
2352 
2353                 if (optlen != sizeof(*tfprog)) {
2354                     return -TARGET_EINVAL;
2355                 }
2356                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2357                     return -TARGET_EFAULT;
2358                 }
2359                 if (!lock_user_struct(VERIFY_READ, tfilter,
2360                                       tswapal(tfprog->filter), 0)) {
2361                     unlock_user_struct(tfprog, optval_addr, 1);
2362                     return -TARGET_EFAULT;
2363                 }
2364 
2365                 fprog.len = tswap16(tfprog->len);
2366                 filter = g_try_new(struct sock_filter, fprog.len);
2367                 if (filter == NULL) {
2368                     unlock_user_struct(tfilter, tfprog->filter, 1);
2369                     unlock_user_struct(tfprog, optval_addr, 1);
2370                     return -TARGET_ENOMEM;
2371                 }
2372                 for (i = 0; i < fprog.len; i++) {
2373                     filter[i].code = tswap16(tfilter[i].code);
2374                     filter[i].jt = tfilter[i].jt;
2375                     filter[i].jf = tfilter[i].jf;
2376                     filter[i].k = tswap32(tfilter[i].k);
2377                 }
2378                 fprog.filter = filter;
2379 
2380                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2381                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2382                 g_free(filter);
2383 
2384                 unlock_user_struct(tfilter, tfprog->filter, 1);
2385                 unlock_user_struct(tfprog, optval_addr, 1);
2386                 return ret;
2387         }
2388 	case TARGET_SO_BINDTODEVICE:
2389 	{
2390 		char *dev_ifname, *addr_ifname;
2391 
2392 		if (optlen > IFNAMSIZ - 1) {
2393 		    optlen = IFNAMSIZ - 1;
2394 		}
2395 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2396 		if (!dev_ifname) {
2397 		    return -TARGET_EFAULT;
2398 		}
2399 		optname = SO_BINDTODEVICE;
2400 		addr_ifname = alloca(IFNAMSIZ);
2401 		memcpy(addr_ifname, dev_ifname, optlen);
2402 		addr_ifname[optlen] = 0;
2403 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2404                                            addr_ifname, optlen));
2405 		unlock_user (dev_ifname, optval_addr, 0);
2406 		return ret;
2407 	}
2408         case TARGET_SO_LINGER:
2409         {
2410                 struct linger lg;
2411                 struct target_linger *tlg;
2412 
2413                 if (optlen != sizeof(struct target_linger)) {
2414                     return -TARGET_EINVAL;
2415                 }
2416                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2417                     return -TARGET_EFAULT;
2418                 }
2419                 __get_user(lg.l_onoff, &tlg->l_onoff);
2420                 __get_user(lg.l_linger, &tlg->l_linger);
2421                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2422                                 &lg, sizeof(lg)));
2423                 unlock_user_struct(tlg, optval_addr, 0);
2424                 return ret;
2425         }
2426             /* Options with 'int' argument.  */
2427         case TARGET_SO_DEBUG:
2428 		optname = SO_DEBUG;
2429 		break;
2430         case TARGET_SO_REUSEADDR:
2431 		optname = SO_REUSEADDR;
2432 		break;
2433 #ifdef SO_REUSEPORT
2434         case TARGET_SO_REUSEPORT:
2435                 optname = SO_REUSEPORT;
2436                 break;
2437 #endif
2438         case TARGET_SO_TYPE:
2439 		optname = SO_TYPE;
2440 		break;
2441         case TARGET_SO_ERROR:
2442 		optname = SO_ERROR;
2443 		break;
2444         case TARGET_SO_DONTROUTE:
2445 		optname = SO_DONTROUTE;
2446 		break;
2447         case TARGET_SO_BROADCAST:
2448 		optname = SO_BROADCAST;
2449 		break;
2450         case TARGET_SO_SNDBUF:
2451 		optname = SO_SNDBUF;
2452 		break;
2453         case TARGET_SO_SNDBUFFORCE:
2454                 optname = SO_SNDBUFFORCE;
2455                 break;
2456         case TARGET_SO_RCVBUF:
2457 		optname = SO_RCVBUF;
2458 		break;
2459         case TARGET_SO_RCVBUFFORCE:
2460                 optname = SO_RCVBUFFORCE;
2461                 break;
2462         case TARGET_SO_KEEPALIVE:
2463 		optname = SO_KEEPALIVE;
2464 		break;
2465         case TARGET_SO_OOBINLINE:
2466 		optname = SO_OOBINLINE;
2467 		break;
2468         case TARGET_SO_NO_CHECK:
2469 		optname = SO_NO_CHECK;
2470 		break;
2471         case TARGET_SO_PRIORITY:
2472 		optname = SO_PRIORITY;
2473 		break;
2474 #ifdef SO_BSDCOMPAT
2475         case TARGET_SO_BSDCOMPAT:
2476 		optname = SO_BSDCOMPAT;
2477 		break;
2478 #endif
2479         case TARGET_SO_PASSCRED:
2480 		optname = SO_PASSCRED;
2481 		break;
2482         case TARGET_SO_PASSSEC:
2483                 optname = SO_PASSSEC;
2484                 break;
2485         case TARGET_SO_TIMESTAMP:
2486 		optname = SO_TIMESTAMP;
2487 		break;
2488         case TARGET_SO_RCVLOWAT:
2489 		optname = SO_RCVLOWAT;
2490 		break;
2491         default:
2492             goto unimplemented;
2493         }
2494 	if (optlen < sizeof(uint32_t))
2495             return -TARGET_EINVAL;
2496 
2497 	if (get_user_u32(val, optval_addr))
2498             return -TARGET_EFAULT;
2499 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2500         break;
2501 #ifdef SOL_NETLINK
2502     case SOL_NETLINK:
2503         switch (optname) {
2504         case NETLINK_PKTINFO:
2505         case NETLINK_ADD_MEMBERSHIP:
2506         case NETLINK_DROP_MEMBERSHIP:
2507         case NETLINK_BROADCAST_ERROR:
2508         case NETLINK_NO_ENOBUFS:
2509 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2510         case NETLINK_LISTEN_ALL_NSID:
2511         case NETLINK_CAP_ACK:
2512 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2513 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2514         case NETLINK_EXT_ACK:
2515 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2516 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2517         case NETLINK_GET_STRICT_CHK:
2518 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2519             break;
2520         default:
2521             goto unimplemented;
2522         }
2523         val = 0;
2524         if (optlen < sizeof(uint32_t)) {
2525             return -TARGET_EINVAL;
2526         }
2527         if (get_user_u32(val, optval_addr)) {
2528             return -TARGET_EFAULT;
2529         }
2530         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2531                                    sizeof(val)));
2532         break;
2533 #endif /* SOL_NETLINK */
2534     default:
2535     unimplemented:
2536         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2537                       level, optname);
2538         ret = -TARGET_ENOPROTOOPT;
2539     }
2540     return ret;
2541 }
2542 
2543 /* do_getsockopt() Must return target values and target errnos. */
2544 static abi_long do_getsockopt(int sockfd, int level, int optname,
2545                               abi_ulong optval_addr, abi_ulong optlen)
2546 {
2547     abi_long ret;
2548     int len, val;
2549     socklen_t lv;
2550 
2551     switch(level) {
2552     case TARGET_SOL_SOCKET:
2553         level = SOL_SOCKET;
2554         switch (optname) {
2555         /* These don't just return a single integer */
2556         case TARGET_SO_PEERNAME:
2557             goto unimplemented;
2558         case TARGET_SO_RCVTIMEO: {
2559             struct timeval tv;
2560             socklen_t tvlen;
2561 
2562             optname = SO_RCVTIMEO;
2563 
2564 get_timeout:
2565             if (get_user_u32(len, optlen)) {
2566                 return -TARGET_EFAULT;
2567             }
2568             if (len < 0) {
2569                 return -TARGET_EINVAL;
2570             }
2571 
2572             tvlen = sizeof(tv);
2573             ret = get_errno(getsockopt(sockfd, level, optname,
2574                                        &tv, &tvlen));
2575             if (ret < 0) {
2576                 return ret;
2577             }
2578             if (len > sizeof(struct target_timeval)) {
2579                 len = sizeof(struct target_timeval);
2580             }
2581             if (copy_to_user_timeval(optval_addr, &tv)) {
2582                 return -TARGET_EFAULT;
2583             }
2584             if (put_user_u32(len, optlen)) {
2585                 return -TARGET_EFAULT;
2586             }
2587             break;
2588         }
2589         case TARGET_SO_SNDTIMEO:
2590             optname = SO_SNDTIMEO;
2591             goto get_timeout;
2592         case TARGET_SO_PEERCRED: {
2593             struct ucred cr;
2594             socklen_t crlen;
2595             struct target_ucred *tcr;
2596 
2597             if (get_user_u32(len, optlen)) {
2598                 return -TARGET_EFAULT;
2599             }
2600             if (len < 0) {
2601                 return -TARGET_EINVAL;
2602             }
2603 
2604             crlen = sizeof(cr);
2605             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2606                                        &cr, &crlen));
2607             if (ret < 0) {
2608                 return ret;
2609             }
2610             if (len > crlen) {
2611                 len = crlen;
2612             }
2613             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2614                 return -TARGET_EFAULT;
2615             }
2616             __put_user(cr.pid, &tcr->pid);
2617             __put_user(cr.uid, &tcr->uid);
2618             __put_user(cr.gid, &tcr->gid);
2619             unlock_user_struct(tcr, optval_addr, 1);
2620             if (put_user_u32(len, optlen)) {
2621                 return -TARGET_EFAULT;
2622             }
2623             break;
2624         }
2625         case TARGET_SO_PEERSEC: {
2626             char *name;
2627 
2628             if (get_user_u32(len, optlen)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             if (len < 0) {
2632                 return -TARGET_EINVAL;
2633             }
2634             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2635             if (!name) {
2636                 return -TARGET_EFAULT;
2637             }
2638             lv = len;
2639             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2640                                        name, &lv));
2641             if (put_user_u32(lv, optlen)) {
2642                 ret = -TARGET_EFAULT;
2643             }
2644             unlock_user(name, optval_addr, lv);
2645             break;
2646         }
2647         case TARGET_SO_LINGER:
2648         {
2649             struct linger lg;
2650             socklen_t lglen;
2651             struct target_linger *tlg;
2652 
2653             if (get_user_u32(len, optlen)) {
2654                 return -TARGET_EFAULT;
2655             }
2656             if (len < 0) {
2657                 return -TARGET_EINVAL;
2658             }
2659 
2660             lglen = sizeof(lg);
2661             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2662                                        &lg, &lglen));
2663             if (ret < 0) {
2664                 return ret;
2665             }
2666             if (len > lglen) {
2667                 len = lglen;
2668             }
2669             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2670                 return -TARGET_EFAULT;
2671             }
2672             __put_user(lg.l_onoff, &tlg->l_onoff);
2673             __put_user(lg.l_linger, &tlg->l_linger);
2674             unlock_user_struct(tlg, optval_addr, 1);
2675             if (put_user_u32(len, optlen)) {
2676                 return -TARGET_EFAULT;
2677             }
2678             break;
2679         }
2680         /* Options with 'int' argument.  */
2681         case TARGET_SO_DEBUG:
2682             optname = SO_DEBUG;
2683             goto int_case;
2684         case TARGET_SO_REUSEADDR:
2685             optname = SO_REUSEADDR;
2686             goto int_case;
2687 #ifdef SO_REUSEPORT
2688         case TARGET_SO_REUSEPORT:
2689             optname = SO_REUSEPORT;
2690             goto int_case;
2691 #endif
2692         case TARGET_SO_TYPE:
2693             optname = SO_TYPE;
2694             goto int_case;
2695         case TARGET_SO_ERROR:
2696             optname = SO_ERROR;
2697             goto int_case;
2698         case TARGET_SO_DONTROUTE:
2699             optname = SO_DONTROUTE;
2700             goto int_case;
2701         case TARGET_SO_BROADCAST:
2702             optname = SO_BROADCAST;
2703             goto int_case;
2704         case TARGET_SO_SNDBUF:
2705             optname = SO_SNDBUF;
2706             goto int_case;
2707         case TARGET_SO_RCVBUF:
2708             optname = SO_RCVBUF;
2709             goto int_case;
2710         case TARGET_SO_KEEPALIVE:
2711             optname = SO_KEEPALIVE;
2712             goto int_case;
2713         case TARGET_SO_OOBINLINE:
2714             optname = SO_OOBINLINE;
2715             goto int_case;
2716         case TARGET_SO_NO_CHECK:
2717             optname = SO_NO_CHECK;
2718             goto int_case;
2719         case TARGET_SO_PRIORITY:
2720             optname = SO_PRIORITY;
2721             goto int_case;
2722 #ifdef SO_BSDCOMPAT
2723         case TARGET_SO_BSDCOMPAT:
2724             optname = SO_BSDCOMPAT;
2725             goto int_case;
2726 #endif
2727         case TARGET_SO_PASSCRED:
2728             optname = SO_PASSCRED;
2729             goto int_case;
2730         case TARGET_SO_TIMESTAMP:
2731             optname = SO_TIMESTAMP;
2732             goto int_case;
2733         case TARGET_SO_RCVLOWAT:
2734             optname = SO_RCVLOWAT;
2735             goto int_case;
2736         case TARGET_SO_ACCEPTCONN:
2737             optname = SO_ACCEPTCONN;
2738             goto int_case;
2739         case TARGET_SO_PROTOCOL:
2740             optname = SO_PROTOCOL;
2741             goto int_case;
2742         case TARGET_SO_DOMAIN:
2743             optname = SO_DOMAIN;
2744             goto int_case;
2745         default:
2746             goto int_case;
2747         }
2748         break;
2749     case SOL_TCP:
2750     case SOL_UDP:
2751         /* TCP and UDP options all take an 'int' value.  */
2752     int_case:
2753         if (get_user_u32(len, optlen))
2754             return -TARGET_EFAULT;
2755         if (len < 0)
2756             return -TARGET_EINVAL;
2757         lv = sizeof(lv);
2758         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2759         if (ret < 0)
2760             return ret;
2761         if (optname == SO_TYPE) {
2762             val = host_to_target_sock_type(val);
2763         }
2764         if (len > lv)
2765             len = lv;
2766         if (len == 4) {
2767             if (put_user_u32(val, optval_addr))
2768                 return -TARGET_EFAULT;
2769         } else {
2770             if (put_user_u8(val, optval_addr))
2771                 return -TARGET_EFAULT;
2772         }
2773         if (put_user_u32(len, optlen))
2774             return -TARGET_EFAULT;
2775         break;
2776     case SOL_IP:
2777         switch(optname) {
2778         case IP_TOS:
2779         case IP_TTL:
2780         case IP_HDRINCL:
2781         case IP_ROUTER_ALERT:
2782         case IP_RECVOPTS:
2783         case IP_RETOPTS:
2784         case IP_PKTINFO:
2785         case IP_MTU_DISCOVER:
2786         case IP_RECVERR:
2787         case IP_RECVTOS:
2788 #ifdef IP_FREEBIND
2789         case IP_FREEBIND:
2790 #endif
2791         case IP_MULTICAST_TTL:
2792         case IP_MULTICAST_LOOP:
2793             if (get_user_u32(len, optlen))
2794                 return -TARGET_EFAULT;
2795             if (len < 0)
2796                 return -TARGET_EINVAL;
2797             lv = sizeof(lv);
2798             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2799             if (ret < 0)
2800                 return ret;
2801             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2802                 len = 1;
2803                 if (put_user_u32(len, optlen)
2804                     || put_user_u8(val, optval_addr))
2805                     return -TARGET_EFAULT;
2806             } else {
2807                 if (len > sizeof(int))
2808                     len = sizeof(int);
2809                 if (put_user_u32(len, optlen)
2810                     || put_user_u32(val, optval_addr))
2811                     return -TARGET_EFAULT;
2812             }
2813             break;
2814         default:
2815             ret = -TARGET_ENOPROTOOPT;
2816             break;
2817         }
2818         break;
2819     case SOL_IPV6:
2820         switch (optname) {
2821         case IPV6_MTU_DISCOVER:
2822         case IPV6_MTU:
2823         case IPV6_V6ONLY:
2824         case IPV6_RECVPKTINFO:
2825         case IPV6_UNICAST_HOPS:
2826         case IPV6_MULTICAST_HOPS:
2827         case IPV6_MULTICAST_LOOP:
2828         case IPV6_RECVERR:
2829         case IPV6_RECVHOPLIMIT:
2830         case IPV6_2292HOPLIMIT:
2831         case IPV6_CHECKSUM:
2832         case IPV6_ADDRFORM:
2833         case IPV6_2292PKTINFO:
2834         case IPV6_RECVTCLASS:
2835         case IPV6_RECVRTHDR:
2836         case IPV6_2292RTHDR:
2837         case IPV6_RECVHOPOPTS:
2838         case IPV6_2292HOPOPTS:
2839         case IPV6_RECVDSTOPTS:
2840         case IPV6_2292DSTOPTS:
2841         case IPV6_TCLASS:
2842         case IPV6_ADDR_PREFERENCES:
2843 #ifdef IPV6_RECVPATHMTU
2844         case IPV6_RECVPATHMTU:
2845 #endif
2846 #ifdef IPV6_TRANSPARENT
2847         case IPV6_TRANSPARENT:
2848 #endif
2849 #ifdef IPV6_FREEBIND
2850         case IPV6_FREEBIND:
2851 #endif
2852 #ifdef IPV6_RECVORIGDSTADDR
2853         case IPV6_RECVORIGDSTADDR:
2854 #endif
2855             if (get_user_u32(len, optlen))
2856                 return -TARGET_EFAULT;
2857             if (len < 0)
2858                 return -TARGET_EINVAL;
2859             lv = sizeof(lv);
2860             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2861             if (ret < 0)
2862                 return ret;
2863             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2864                 len = 1;
2865                 if (put_user_u32(len, optlen)
2866                     || put_user_u8(val, optval_addr))
2867                     return -TARGET_EFAULT;
2868             } else {
2869                 if (len > sizeof(int))
2870                     len = sizeof(int);
2871                 if (put_user_u32(len, optlen)
2872                     || put_user_u32(val, optval_addr))
2873                     return -TARGET_EFAULT;
2874             }
2875             break;
2876         default:
2877             ret = -TARGET_ENOPROTOOPT;
2878             break;
2879         }
2880         break;
2881 #ifdef SOL_NETLINK
2882     case SOL_NETLINK:
2883         switch (optname) {
2884         case NETLINK_PKTINFO:
2885         case NETLINK_BROADCAST_ERROR:
2886         case NETLINK_NO_ENOBUFS:
2887 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2888         case NETLINK_LISTEN_ALL_NSID:
2889         case NETLINK_CAP_ACK:
2890 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2891 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2892         case NETLINK_EXT_ACK:
2893 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2894 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2895         case NETLINK_GET_STRICT_CHK:
2896 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2897             if (get_user_u32(len, optlen)) {
2898                 return -TARGET_EFAULT;
2899             }
2900             if (len != sizeof(val)) {
2901                 return -TARGET_EINVAL;
2902             }
2903             lv = len;
2904             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2905             if (ret < 0) {
2906                 return ret;
2907             }
2908             if (put_user_u32(lv, optlen)
2909                 || put_user_u32(val, optval_addr)) {
2910                 return -TARGET_EFAULT;
2911             }
2912             break;
2913 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2914         case NETLINK_LIST_MEMBERSHIPS:
2915         {
2916             uint32_t *results;
2917             int i;
2918             if (get_user_u32(len, optlen)) {
2919                 return -TARGET_EFAULT;
2920             }
2921             if (len < 0) {
2922                 return -TARGET_EINVAL;
2923             }
2924             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2925             if (!results && len > 0) {
2926                 return -TARGET_EFAULT;
2927             }
2928             lv = len;
2929             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2930             if (ret < 0) {
2931                 unlock_user(results, optval_addr, 0);
2932                 return ret;
2933             }
2934             /* swap host endianess to target endianess. */
2935             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2936                 results[i] = tswap32(results[i]);
2937             }
2938             if (put_user_u32(lv, optlen)) {
2939                 return -TARGET_EFAULT;
2940             }
2941             unlock_user(results, optval_addr, 0);
2942             break;
2943         }
2944 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2945         default:
2946             goto unimplemented;
2947         }
2948         break;
2949 #endif /* SOL_NETLINK */
2950     default:
2951     unimplemented:
2952         qemu_log_mask(LOG_UNIMP,
2953                       "getsockopt level=%d optname=%d not yet supported\n",
2954                       level, optname);
2955         ret = -TARGET_EOPNOTSUPP;
2956         break;
2957     }
2958     return ret;
2959 }
2960 
2961 /* Convert target low/high pair representing file offset into the host
2962  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2963  * as the kernel doesn't handle them either.
2964  */
2965 static void target_to_host_low_high(abi_ulong tlow,
2966                                     abi_ulong thigh,
2967                                     unsigned long *hlow,
2968                                     unsigned long *hhigh)
2969 {
2970     uint64_t off = tlow |
2971         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2972         TARGET_LONG_BITS / 2;
2973 
2974     *hlow = off;
2975     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2976 }
2977 
2978 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2979                                 abi_ulong count, int copy)
2980 {
2981     struct target_iovec *target_vec;
2982     struct iovec *vec;
2983     abi_ulong total_len, max_len;
2984     int i;
2985     int err = 0;
2986     bool bad_address = false;
2987 
2988     if (count == 0) {
2989         errno = 0;
2990         return NULL;
2991     }
2992     if (count > IOV_MAX) {
2993         errno = EINVAL;
2994         return NULL;
2995     }
2996 
2997     vec = g_try_new0(struct iovec, count);
2998     if (vec == NULL) {
2999         errno = ENOMEM;
3000         return NULL;
3001     }
3002 
3003     target_vec = lock_user(VERIFY_READ, target_addr,
3004                            count * sizeof(struct target_iovec), 1);
3005     if (target_vec == NULL) {
3006         err = EFAULT;
3007         goto fail2;
3008     }
3009 
3010     /* ??? If host page size > target page size, this will result in a
3011        value larger than what we can actually support.  */
3012     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3013     total_len = 0;
3014 
3015     for (i = 0; i < count; i++) {
3016         abi_ulong base = tswapal(target_vec[i].iov_base);
3017         abi_long len = tswapal(target_vec[i].iov_len);
3018 
3019         if (len < 0) {
3020             err = EINVAL;
3021             goto fail;
3022         } else if (len == 0) {
3023             /* Zero length pointer is ignored.  */
3024             vec[i].iov_base = 0;
3025         } else {
3026             vec[i].iov_base = lock_user(type, base, len, copy);
3027             /* If the first buffer pointer is bad, this is a fault.  But
3028              * subsequent bad buffers will result in a partial write; this
3029              * is realized by filling the vector with null pointers and
3030              * zero lengths. */
3031             if (!vec[i].iov_base) {
3032                 if (i == 0) {
3033                     err = EFAULT;
3034                     goto fail;
3035                 } else {
3036                     bad_address = true;
3037                 }
3038             }
3039             if (bad_address) {
3040                 len = 0;
3041             }
3042             if (len > max_len - total_len) {
3043                 len = max_len - total_len;
3044             }
3045         }
3046         vec[i].iov_len = len;
3047         total_len += len;
3048     }
3049 
3050     unlock_user(target_vec, target_addr, 0);
3051     return vec;
3052 
3053  fail:
3054     while (--i >= 0) {
3055         if (tswapal(target_vec[i].iov_len) > 0) {
3056             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3057         }
3058     }
3059     unlock_user(target_vec, target_addr, 0);
3060  fail2:
3061     g_free(vec);
3062     errno = err;
3063     return NULL;
3064 }
3065 
3066 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3067                          abi_ulong count, int copy)
3068 {
3069     struct target_iovec *target_vec;
3070     int i;
3071 
3072     target_vec = lock_user(VERIFY_READ, target_addr,
3073                            count * sizeof(struct target_iovec), 1);
3074     if (target_vec) {
3075         for (i = 0; i < count; i++) {
3076             abi_ulong base = tswapal(target_vec[i].iov_base);
3077             abi_long len = tswapal(target_vec[i].iov_len);
3078             if (len < 0) {
3079                 break;
3080             }
3081             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3082         }
3083         unlock_user(target_vec, target_addr, 0);
3084     }
3085 
3086     g_free(vec);
3087 }
3088 
3089 static inline int target_to_host_sock_type(int *type)
3090 {
3091     int host_type = 0;
3092     int target_type = *type;
3093 
3094     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3095     case TARGET_SOCK_DGRAM:
3096         host_type = SOCK_DGRAM;
3097         break;
3098     case TARGET_SOCK_STREAM:
3099         host_type = SOCK_STREAM;
3100         break;
3101     default:
3102         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3103         break;
3104     }
3105     if (target_type & TARGET_SOCK_CLOEXEC) {
3106 #if defined(SOCK_CLOEXEC)
3107         host_type |= SOCK_CLOEXEC;
3108 #else
3109         return -TARGET_EINVAL;
3110 #endif
3111     }
3112     if (target_type & TARGET_SOCK_NONBLOCK) {
3113 #if defined(SOCK_NONBLOCK)
3114         host_type |= SOCK_NONBLOCK;
3115 #elif !defined(O_NONBLOCK)
3116         return -TARGET_EINVAL;
3117 #endif
3118     }
3119     *type = host_type;
3120     return 0;
3121 }
3122 
3123 /* Try to emulate socket type flags after socket creation.  */
3124 static int sock_flags_fixup(int fd, int target_type)
3125 {
3126 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3127     if (target_type & TARGET_SOCK_NONBLOCK) {
3128         int flags = fcntl(fd, F_GETFL);
3129         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3130             close(fd);
3131             return -TARGET_EINVAL;
3132         }
3133     }
3134 #endif
3135     return fd;
3136 }
3137 
3138 /* do_socket() Must return target values and target errnos. */
3139 static abi_long do_socket(int domain, int type, int protocol)
3140 {
3141     int target_type = type;
3142     int ret;
3143 
3144     ret = target_to_host_sock_type(&type);
3145     if (ret) {
3146         return ret;
3147     }
3148 
3149     if (domain == PF_NETLINK && !(
3150 #ifdef CONFIG_RTNETLINK
3151          protocol == NETLINK_ROUTE ||
3152 #endif
3153          protocol == NETLINK_KOBJECT_UEVENT ||
3154          protocol == NETLINK_AUDIT)) {
3155         return -TARGET_EPROTONOSUPPORT;
3156     }
3157 
3158     if (domain == AF_PACKET ||
3159         (domain == AF_INET && type == SOCK_PACKET)) {
3160         protocol = tswap16(protocol);
3161     }
3162 
3163     ret = get_errno(socket(domain, type, protocol));
3164     if (ret >= 0) {
3165         ret = sock_flags_fixup(ret, target_type);
3166         if (type == SOCK_PACKET) {
3167             /* Manage an obsolete case :
3168              * if socket type is SOCK_PACKET, bind by name
3169              */
3170             fd_trans_register(ret, &target_packet_trans);
3171         } else if (domain == PF_NETLINK) {
3172             switch (protocol) {
3173 #ifdef CONFIG_RTNETLINK
3174             case NETLINK_ROUTE:
3175                 fd_trans_register(ret, &target_netlink_route_trans);
3176                 break;
3177 #endif
3178             case NETLINK_KOBJECT_UEVENT:
3179                 /* nothing to do: messages are strings */
3180                 break;
3181             case NETLINK_AUDIT:
3182                 fd_trans_register(ret, &target_netlink_audit_trans);
3183                 break;
3184             default:
3185                 g_assert_not_reached();
3186             }
3187         }
3188     }
3189     return ret;
3190 }
3191 
3192 /* do_bind() Must return target values and target errnos. */
3193 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3194                         socklen_t addrlen)
3195 {
3196     void *addr;
3197     abi_long ret;
3198 
3199     if ((int)addrlen < 0) {
3200         return -TARGET_EINVAL;
3201     }
3202 
3203     addr = alloca(addrlen+1);
3204 
3205     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3206     if (ret)
3207         return ret;
3208 
3209     return get_errno(bind(sockfd, addr, addrlen));
3210 }
3211 
3212 /* do_connect() Must return target values and target errnos. */
3213 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3214                            socklen_t addrlen)
3215 {
3216     void *addr;
3217     abi_long ret;
3218 
3219     if ((int)addrlen < 0) {
3220         return -TARGET_EINVAL;
3221     }
3222 
3223     addr = alloca(addrlen+1);
3224 
3225     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3226     if (ret)
3227         return ret;
3228 
3229     return get_errno(safe_connect(sockfd, addr, addrlen));
3230 }
3231 
3232 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3233 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3234                                       int flags, int send)
3235 {
3236     abi_long ret, len;
3237     struct msghdr msg;
3238     abi_ulong count;
3239     struct iovec *vec;
3240     abi_ulong target_vec;
3241 
3242     if (msgp->msg_name) {
3243         msg.msg_namelen = tswap32(msgp->msg_namelen);
3244         msg.msg_name = alloca(msg.msg_namelen+1);
3245         ret = target_to_host_sockaddr(fd, msg.msg_name,
3246                                       tswapal(msgp->msg_name),
3247                                       msg.msg_namelen);
3248         if (ret == -TARGET_EFAULT) {
3249             /* For connected sockets msg_name and msg_namelen must
3250              * be ignored, so returning EFAULT immediately is wrong.
3251              * Instead, pass a bad msg_name to the host kernel, and
3252              * let it decide whether to return EFAULT or not.
3253              */
3254             msg.msg_name = (void *)-1;
3255         } else if (ret) {
3256             goto out2;
3257         }
3258     } else {
3259         msg.msg_name = NULL;
3260         msg.msg_namelen = 0;
3261     }
3262     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3263     msg.msg_control = alloca(msg.msg_controllen);
3264     memset(msg.msg_control, 0, msg.msg_controllen);
3265 
3266     msg.msg_flags = tswap32(msgp->msg_flags);
3267 
3268     count = tswapal(msgp->msg_iovlen);
3269     target_vec = tswapal(msgp->msg_iov);
3270 
3271     if (count > IOV_MAX) {
3272         /* sendrcvmsg returns a different errno for this condition than
3273          * readv/writev, so we must catch it here before lock_iovec() does.
3274          */
3275         ret = -TARGET_EMSGSIZE;
3276         goto out2;
3277     }
3278 
3279     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3280                      target_vec, count, send);
3281     if (vec == NULL) {
3282         ret = -host_to_target_errno(errno);
3283         goto out2;
3284     }
3285     msg.msg_iovlen = count;
3286     msg.msg_iov = vec;
3287 
3288     if (send) {
3289         if (fd_trans_target_to_host_data(fd)) {
3290             void *host_msg;
3291 
3292             host_msg = g_malloc(msg.msg_iov->iov_len);
3293             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3294             ret = fd_trans_target_to_host_data(fd)(host_msg,
3295                                                    msg.msg_iov->iov_len);
3296             if (ret >= 0) {
3297                 msg.msg_iov->iov_base = host_msg;
3298                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3299             }
3300             g_free(host_msg);
3301         } else {
3302             ret = target_to_host_cmsg(&msg, msgp);
3303             if (ret == 0) {
3304                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3305             }
3306         }
3307     } else {
3308         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3309         if (!is_error(ret)) {
3310             len = ret;
3311             if (fd_trans_host_to_target_data(fd)) {
3312                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3313                                                MIN(msg.msg_iov->iov_len, len));
3314             }
3315             if (!is_error(ret)) {
3316                 ret = host_to_target_cmsg(msgp, &msg);
3317             }
3318             if (!is_error(ret)) {
3319                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3320                 msgp->msg_flags = tswap32(msg.msg_flags);
3321                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3322                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3323                                     msg.msg_name, msg.msg_namelen);
3324                     if (ret) {
3325                         goto out;
3326                     }
3327                 }
3328 
3329                 ret = len;
3330             }
3331         }
3332     }
3333 
3334 out:
3335     unlock_iovec(vec, target_vec, count, !send);
3336 out2:
3337     return ret;
3338 }
3339 
3340 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3341                                int flags, int send)
3342 {
3343     abi_long ret;
3344     struct target_msghdr *msgp;
3345 
3346     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3347                           msgp,
3348                           target_msg,
3349                           send ? 1 : 0)) {
3350         return -TARGET_EFAULT;
3351     }
3352     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3353     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3354     return ret;
3355 }
3356 
3357 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3358  * so it might not have this *mmsg-specific flag either.
3359  */
3360 #ifndef MSG_WAITFORONE
3361 #define MSG_WAITFORONE 0x10000
3362 #endif
3363 
3364 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3365                                 unsigned int vlen, unsigned int flags,
3366                                 int send)
3367 {
3368     struct target_mmsghdr *mmsgp;
3369     abi_long ret = 0;
3370     int i;
3371 
3372     if (vlen > UIO_MAXIOV) {
3373         vlen = UIO_MAXIOV;
3374     }
3375 
3376     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3377     if (!mmsgp) {
3378         return -TARGET_EFAULT;
3379     }
3380 
3381     for (i = 0; i < vlen; i++) {
3382         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3383         if (is_error(ret)) {
3384             break;
3385         }
3386         mmsgp[i].msg_len = tswap32(ret);
3387         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3388         if (flags & MSG_WAITFORONE) {
3389             flags |= MSG_DONTWAIT;
3390         }
3391     }
3392 
3393     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3394 
3395     /* Return number of datagrams sent if we sent any at all;
3396      * otherwise return the error.
3397      */
3398     if (i) {
3399         return i;
3400     }
3401     return ret;
3402 }
3403 
3404 /* do_accept4() Must return target values and target errnos. */
3405 static abi_long do_accept4(int fd, abi_ulong target_addr,
3406                            abi_ulong target_addrlen_addr, int flags)
3407 {
3408     socklen_t addrlen, ret_addrlen;
3409     void *addr;
3410     abi_long ret;
3411     int host_flags;
3412 
3413     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3414 
3415     if (target_addr == 0) {
3416         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3417     }
3418 
3419     /* linux returns EFAULT if addrlen pointer is invalid */
3420     if (get_user_u32(addrlen, target_addrlen_addr))
3421         return -TARGET_EFAULT;
3422 
3423     if ((int)addrlen < 0) {
3424         return -TARGET_EINVAL;
3425     }
3426 
3427     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3428         return -TARGET_EFAULT;
3429     }
3430 
3431     addr = alloca(addrlen);
3432 
3433     ret_addrlen = addrlen;
3434     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3435     if (!is_error(ret)) {
3436         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3437         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3438             ret = -TARGET_EFAULT;
3439         }
3440     }
3441     return ret;
3442 }
3443 
3444 /* do_getpeername() Must return target values and target errnos. */
3445 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3446                                abi_ulong target_addrlen_addr)
3447 {
3448     socklen_t addrlen, ret_addrlen;
3449     void *addr;
3450     abi_long ret;
3451 
3452     if (get_user_u32(addrlen, target_addrlen_addr))
3453         return -TARGET_EFAULT;
3454 
3455     if ((int)addrlen < 0) {
3456         return -TARGET_EINVAL;
3457     }
3458 
3459     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3460         return -TARGET_EFAULT;
3461     }
3462 
3463     addr = alloca(addrlen);
3464 
3465     ret_addrlen = addrlen;
3466     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3467     if (!is_error(ret)) {
3468         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3469         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3470             ret = -TARGET_EFAULT;
3471         }
3472     }
3473     return ret;
3474 }
3475 
3476 /* do_getsockname() Must return target values and target errnos. */
3477 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3478                                abi_ulong target_addrlen_addr)
3479 {
3480     socklen_t addrlen, ret_addrlen;
3481     void *addr;
3482     abi_long ret;
3483 
3484     if (get_user_u32(addrlen, target_addrlen_addr))
3485         return -TARGET_EFAULT;
3486 
3487     if ((int)addrlen < 0) {
3488         return -TARGET_EINVAL;
3489     }
3490 
3491     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3492         return -TARGET_EFAULT;
3493     }
3494 
3495     addr = alloca(addrlen);
3496 
3497     ret_addrlen = addrlen;
3498     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3499     if (!is_error(ret)) {
3500         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3501         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3502             ret = -TARGET_EFAULT;
3503         }
3504     }
3505     return ret;
3506 }
3507 
3508 /* do_socketpair() Must return target values and target errnos. */
3509 static abi_long do_socketpair(int domain, int type, int protocol,
3510                               abi_ulong target_tab_addr)
3511 {
3512     int tab[2];
3513     abi_long ret;
3514 
3515     target_to_host_sock_type(&type);
3516 
3517     ret = get_errno(socketpair(domain, type, protocol, tab));
3518     if (!is_error(ret)) {
3519         if (put_user_s32(tab[0], target_tab_addr)
3520             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3521             ret = -TARGET_EFAULT;
3522     }
3523     return ret;
3524 }
3525 
3526 /* do_sendto() Must return target values and target errnos. */
3527 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3528                           abi_ulong target_addr, socklen_t addrlen)
3529 {
3530     void *addr;
3531     void *host_msg;
3532     void *copy_msg = NULL;
3533     abi_long ret;
3534 
3535     if ((int)addrlen < 0) {
3536         return -TARGET_EINVAL;
3537     }
3538 
3539     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3540     if (!host_msg)
3541         return -TARGET_EFAULT;
3542     if (fd_trans_target_to_host_data(fd)) {
3543         copy_msg = host_msg;
3544         host_msg = g_malloc(len);
3545         memcpy(host_msg, copy_msg, len);
3546         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3547         if (ret < 0) {
3548             goto fail;
3549         }
3550     }
3551     if (target_addr) {
3552         addr = alloca(addrlen+1);
3553         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3554         if (ret) {
3555             goto fail;
3556         }
3557         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3558     } else {
3559         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3560     }
3561 fail:
3562     if (copy_msg) {
3563         g_free(host_msg);
3564         host_msg = copy_msg;
3565     }
3566     unlock_user(host_msg, msg, 0);
3567     return ret;
3568 }
3569 
3570 /* do_recvfrom() Must return target values and target errnos. */
3571 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3572                             abi_ulong target_addr,
3573                             abi_ulong target_addrlen)
3574 {
3575     socklen_t addrlen, ret_addrlen;
3576     void *addr;
3577     void *host_msg;
3578     abi_long ret;
3579 
3580     if (!msg) {
3581         host_msg = NULL;
3582     } else {
3583         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3584         if (!host_msg) {
3585             return -TARGET_EFAULT;
3586         }
3587     }
3588     if (target_addr) {
3589         if (get_user_u32(addrlen, target_addrlen)) {
3590             ret = -TARGET_EFAULT;
3591             goto fail;
3592         }
3593         if ((int)addrlen < 0) {
3594             ret = -TARGET_EINVAL;
3595             goto fail;
3596         }
3597         addr = alloca(addrlen);
3598         ret_addrlen = addrlen;
3599         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3600                                       addr, &ret_addrlen));
3601     } else {
3602         addr = NULL; /* To keep compiler quiet.  */
3603         addrlen = 0; /* To keep compiler quiet.  */
3604         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3605     }
3606     if (!is_error(ret)) {
3607         if (fd_trans_host_to_target_data(fd)) {
3608             abi_long trans;
3609             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3610             if (is_error(trans)) {
3611                 ret = trans;
3612                 goto fail;
3613             }
3614         }
3615         if (target_addr) {
3616             host_to_target_sockaddr(target_addr, addr,
3617                                     MIN(addrlen, ret_addrlen));
3618             if (put_user_u32(ret_addrlen, target_addrlen)) {
3619                 ret = -TARGET_EFAULT;
3620                 goto fail;
3621             }
3622         }
3623         unlock_user(host_msg, msg, len);
3624     } else {
3625 fail:
3626         unlock_user(host_msg, msg, 0);
3627     }
3628     return ret;
3629 }
3630 
3631 #ifdef TARGET_NR_socketcall
3632 /* do_socketcall() must return target values and target errnos. */
3633 static abi_long do_socketcall(int num, abi_ulong vptr)
3634 {
3635     static const unsigned nargs[] = { /* number of arguments per operation */
3636         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3637         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3638         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3639         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3640         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3641         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3642         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3643         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3644         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3645         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3646         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3647         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3648         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3649         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3650         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3651         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3652         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3653         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3654         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3655         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3656     };
3657     abi_long a[6]; /* max 6 args */
3658     unsigned i;
3659 
3660     /* check the range of the first argument num */
3661     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3662     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3663         return -TARGET_EINVAL;
3664     }
3665     /* ensure we have space for args */
3666     if (nargs[num] > ARRAY_SIZE(a)) {
3667         return -TARGET_EINVAL;
3668     }
3669     /* collect the arguments in a[] according to nargs[] */
3670     for (i = 0; i < nargs[num]; ++i) {
3671         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3672             return -TARGET_EFAULT;
3673         }
3674     }
3675     /* now when we have the args, invoke the appropriate underlying function */
3676     switch (num) {
3677     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3678         return do_socket(a[0], a[1], a[2]);
3679     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3680         return do_bind(a[0], a[1], a[2]);
3681     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3682         return do_connect(a[0], a[1], a[2]);
3683     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3684         return get_errno(listen(a[0], a[1]));
3685     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3686         return do_accept4(a[0], a[1], a[2], 0);
3687     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3688         return do_getsockname(a[0], a[1], a[2]);
3689     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3690         return do_getpeername(a[0], a[1], a[2]);
3691     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3692         return do_socketpair(a[0], a[1], a[2], a[3]);
3693     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3694         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3695     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3696         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3697     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3698         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3699     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3700         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3701     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3702         return get_errno(shutdown(a[0], a[1]));
3703     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3704         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3705     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3706         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3707     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3708         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3709     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3710         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3711     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3712         return do_accept4(a[0], a[1], a[2], a[3]);
3713     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3714         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3715     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3716         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3717     default:
3718         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3719         return -TARGET_EINVAL;
3720     }
3721 }
3722 #endif
3723 
3724 #define N_SHM_REGIONS	32
3725 
3726 static struct shm_region {
3727     abi_ulong start;
3728     abi_ulong size;
3729     bool in_use;
3730 } shm_regions[N_SHM_REGIONS];
3731 
3732 #ifndef TARGET_SEMID64_DS
3733 /* asm-generic version of this struct */
3734 struct target_semid64_ds
3735 {
3736   struct target_ipc_perm sem_perm;
3737   abi_ulong sem_otime;
3738 #if TARGET_ABI_BITS == 32
3739   abi_ulong __unused1;
3740 #endif
3741   abi_ulong sem_ctime;
3742 #if TARGET_ABI_BITS == 32
3743   abi_ulong __unused2;
3744 #endif
3745   abi_ulong sem_nsems;
3746   abi_ulong __unused3;
3747   abi_ulong __unused4;
3748 };
3749 #endif
3750 
3751 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3752                                                abi_ulong target_addr)
3753 {
3754     struct target_ipc_perm *target_ip;
3755     struct target_semid64_ds *target_sd;
3756 
3757     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3758         return -TARGET_EFAULT;
3759     target_ip = &(target_sd->sem_perm);
3760     host_ip->__key = tswap32(target_ip->__key);
3761     host_ip->uid = tswap32(target_ip->uid);
3762     host_ip->gid = tswap32(target_ip->gid);
3763     host_ip->cuid = tswap32(target_ip->cuid);
3764     host_ip->cgid = tswap32(target_ip->cgid);
3765 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3766     host_ip->mode = tswap32(target_ip->mode);
3767 #else
3768     host_ip->mode = tswap16(target_ip->mode);
3769 #endif
3770 #if defined(TARGET_PPC)
3771     host_ip->__seq = tswap32(target_ip->__seq);
3772 #else
3773     host_ip->__seq = tswap16(target_ip->__seq);
3774 #endif
3775     unlock_user_struct(target_sd, target_addr, 0);
3776     return 0;
3777 }
3778 
3779 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3780                                                struct ipc_perm *host_ip)
3781 {
3782     struct target_ipc_perm *target_ip;
3783     struct target_semid64_ds *target_sd;
3784 
3785     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3786         return -TARGET_EFAULT;
3787     target_ip = &(target_sd->sem_perm);
3788     target_ip->__key = tswap32(host_ip->__key);
3789     target_ip->uid = tswap32(host_ip->uid);
3790     target_ip->gid = tswap32(host_ip->gid);
3791     target_ip->cuid = tswap32(host_ip->cuid);
3792     target_ip->cgid = tswap32(host_ip->cgid);
3793 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3794     target_ip->mode = tswap32(host_ip->mode);
3795 #else
3796     target_ip->mode = tswap16(host_ip->mode);
3797 #endif
3798 #if defined(TARGET_PPC)
3799     target_ip->__seq = tswap32(host_ip->__seq);
3800 #else
3801     target_ip->__seq = tswap16(host_ip->__seq);
3802 #endif
3803     unlock_user_struct(target_sd, target_addr, 1);
3804     return 0;
3805 }
3806 
3807 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3808                                                abi_ulong target_addr)
3809 {
3810     struct target_semid64_ds *target_sd;
3811 
3812     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3813         return -TARGET_EFAULT;
3814     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3815         return -TARGET_EFAULT;
3816     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3817     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3818     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3819     unlock_user_struct(target_sd, target_addr, 0);
3820     return 0;
3821 }
3822 
3823 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3824                                                struct semid_ds *host_sd)
3825 {
3826     struct target_semid64_ds *target_sd;
3827 
3828     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3829         return -TARGET_EFAULT;
3830     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3831         return -TARGET_EFAULT;
3832     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3833     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3834     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3835     unlock_user_struct(target_sd, target_addr, 1);
3836     return 0;
3837 }
3838 
3839 struct target_seminfo {
3840     int semmap;
3841     int semmni;
3842     int semmns;
3843     int semmnu;
3844     int semmsl;
3845     int semopm;
3846     int semume;
3847     int semusz;
3848     int semvmx;
3849     int semaem;
3850 };
3851 
3852 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3853                                               struct seminfo *host_seminfo)
3854 {
3855     struct target_seminfo *target_seminfo;
3856     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3857         return -TARGET_EFAULT;
3858     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3859     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3860     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3861     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3862     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3863     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3864     __put_user(host_seminfo->semume, &target_seminfo->semume);
3865     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3866     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3867     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3868     unlock_user_struct(target_seminfo, target_addr, 1);
3869     return 0;
3870 }
3871 
3872 union semun {
3873 	int val;
3874 	struct semid_ds *buf;
3875 	unsigned short *array;
3876 	struct seminfo *__buf;
3877 };
3878 
3879 union target_semun {
3880 	int val;
3881 	abi_ulong buf;
3882 	abi_ulong array;
3883 	abi_ulong __buf;
3884 };
3885 
3886 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3887                                                abi_ulong target_addr)
3888 {
3889     int nsems;
3890     unsigned short *array;
3891     union semun semun;
3892     struct semid_ds semid_ds;
3893     int i, ret;
3894 
3895     semun.buf = &semid_ds;
3896 
3897     ret = semctl(semid, 0, IPC_STAT, semun);
3898     if (ret == -1)
3899         return get_errno(ret);
3900 
3901     nsems = semid_ds.sem_nsems;
3902 
3903     *host_array = g_try_new(unsigned short, nsems);
3904     if (!*host_array) {
3905         return -TARGET_ENOMEM;
3906     }
3907     array = lock_user(VERIFY_READ, target_addr,
3908                       nsems*sizeof(unsigned short), 1);
3909     if (!array) {
3910         g_free(*host_array);
3911         return -TARGET_EFAULT;
3912     }
3913 
3914     for(i=0; i<nsems; i++) {
3915         __get_user((*host_array)[i], &array[i]);
3916     }
3917     unlock_user(array, target_addr, 0);
3918 
3919     return 0;
3920 }
3921 
3922 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3923                                                unsigned short **host_array)
3924 {
3925     int nsems;
3926     unsigned short *array;
3927     union semun semun;
3928     struct semid_ds semid_ds;
3929     int i, ret;
3930 
3931     semun.buf = &semid_ds;
3932 
3933     ret = semctl(semid, 0, IPC_STAT, semun);
3934     if (ret == -1)
3935         return get_errno(ret);
3936 
3937     nsems = semid_ds.sem_nsems;
3938 
3939     array = lock_user(VERIFY_WRITE, target_addr,
3940                       nsems*sizeof(unsigned short), 0);
3941     if (!array)
3942         return -TARGET_EFAULT;
3943 
3944     for(i=0; i<nsems; i++) {
3945         __put_user((*host_array)[i], &array[i]);
3946     }
3947     g_free(*host_array);
3948     unlock_user(array, target_addr, 1);
3949 
3950     return 0;
3951 }
3952 
3953 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3954                                  abi_ulong target_arg)
3955 {
3956     union target_semun target_su = { .buf = target_arg };
3957     union semun arg;
3958     struct semid_ds dsarg;
3959     unsigned short *array = NULL;
3960     struct seminfo seminfo;
3961     abi_long ret = -TARGET_EINVAL;
3962     abi_long err;
3963     cmd &= 0xff;
3964 
3965     switch( cmd ) {
3966 	case GETVAL:
3967 	case SETVAL:
3968             /* In 64 bit cross-endian situations, we will erroneously pick up
3969              * the wrong half of the union for the "val" element.  To rectify
3970              * this, the entire 8-byte structure is byteswapped, followed by
3971 	     * a swap of the 4 byte val field. In other cases, the data is
3972 	     * already in proper host byte order. */
3973 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3974 		target_su.buf = tswapal(target_su.buf);
3975 		arg.val = tswap32(target_su.val);
3976 	    } else {
3977 		arg.val = target_su.val;
3978 	    }
3979             ret = get_errno(semctl(semid, semnum, cmd, arg));
3980             break;
3981 	case GETALL:
3982 	case SETALL:
3983             err = target_to_host_semarray(semid, &array, target_su.array);
3984             if (err)
3985                 return err;
3986             arg.array = array;
3987             ret = get_errno(semctl(semid, semnum, cmd, arg));
3988             err = host_to_target_semarray(semid, target_su.array, &array);
3989             if (err)
3990                 return err;
3991             break;
3992 	case IPC_STAT:
3993 	case IPC_SET:
3994 	case SEM_STAT:
3995             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3996             if (err)
3997                 return err;
3998             arg.buf = &dsarg;
3999             ret = get_errno(semctl(semid, semnum, cmd, arg));
4000             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4001             if (err)
4002                 return err;
4003             break;
4004 	case IPC_INFO:
4005 	case SEM_INFO:
4006             arg.__buf = &seminfo;
4007             ret = get_errno(semctl(semid, semnum, cmd, arg));
4008             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4009             if (err)
4010                 return err;
4011             break;
4012 	case IPC_RMID:
4013 	case GETPID:
4014 	case GETNCNT:
4015 	case GETZCNT:
4016             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4017             break;
4018     }
4019 
4020     return ret;
4021 }
4022 
4023 struct target_sembuf {
4024     unsigned short sem_num;
4025     short sem_op;
4026     short sem_flg;
4027 };
4028 
4029 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4030                                              abi_ulong target_addr,
4031                                              unsigned nsops)
4032 {
4033     struct target_sembuf *target_sembuf;
4034     int i;
4035 
4036     target_sembuf = lock_user(VERIFY_READ, target_addr,
4037                               nsops*sizeof(struct target_sembuf), 1);
4038     if (!target_sembuf)
4039         return -TARGET_EFAULT;
4040 
4041     for(i=0; i<nsops; i++) {
4042         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4043         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4044         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4045     }
4046 
4047     unlock_user(target_sembuf, target_addr, 0);
4048 
4049     return 0;
4050 }
4051 
4052 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4053     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4054 
4055 /*
4056  * This macro is required to handle the s390 variants, which passes the
4057  * arguments in a different order than default.
4058  */
4059 #ifdef __s390x__
4060 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4061   (__nsops), (__timeout), (__sops)
4062 #else
4063 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4064   (__nsops), 0, (__sops), (__timeout)
4065 #endif
4066 
4067 static inline abi_long do_semtimedop(int semid,
4068                                      abi_long ptr,
4069                                      unsigned nsops,
4070                                      abi_long timeout, bool time64)
4071 {
4072     struct sembuf *sops;
4073     struct timespec ts, *pts = NULL;
4074     abi_long ret;
4075 
4076     if (timeout) {
4077         pts = &ts;
4078         if (time64) {
4079             if (target_to_host_timespec64(pts, timeout)) {
4080                 return -TARGET_EFAULT;
4081             }
4082         } else {
4083             if (target_to_host_timespec(pts, timeout)) {
4084                 return -TARGET_EFAULT;
4085             }
4086         }
4087     }
4088 
4089     if (nsops > TARGET_SEMOPM) {
4090         return -TARGET_E2BIG;
4091     }
4092 
4093     sops = g_new(struct sembuf, nsops);
4094 
4095     if (target_to_host_sembuf(sops, ptr, nsops)) {
4096         g_free(sops);
4097         return -TARGET_EFAULT;
4098     }
4099 
4100     ret = -TARGET_ENOSYS;
4101 #ifdef __NR_semtimedop
4102     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4103 #endif
4104 #ifdef __NR_ipc
4105     if (ret == -TARGET_ENOSYS) {
4106         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4107                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4108     }
4109 #endif
4110     g_free(sops);
4111     return ret;
4112 }
4113 #endif
4114 
4115 struct target_msqid_ds
4116 {
4117     struct target_ipc_perm msg_perm;
4118     abi_ulong msg_stime;
4119 #if TARGET_ABI_BITS == 32
4120     abi_ulong __unused1;
4121 #endif
4122     abi_ulong msg_rtime;
4123 #if TARGET_ABI_BITS == 32
4124     abi_ulong __unused2;
4125 #endif
4126     abi_ulong msg_ctime;
4127 #if TARGET_ABI_BITS == 32
4128     abi_ulong __unused3;
4129 #endif
4130     abi_ulong __msg_cbytes;
4131     abi_ulong msg_qnum;
4132     abi_ulong msg_qbytes;
4133     abi_ulong msg_lspid;
4134     abi_ulong msg_lrpid;
4135     abi_ulong __unused4;
4136     abi_ulong __unused5;
4137 };
4138 
4139 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4140                                                abi_ulong target_addr)
4141 {
4142     struct target_msqid_ds *target_md;
4143 
4144     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4145         return -TARGET_EFAULT;
4146     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4147         return -TARGET_EFAULT;
4148     host_md->msg_stime = tswapal(target_md->msg_stime);
4149     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4150     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4151     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4152     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4153     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4154     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4155     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4156     unlock_user_struct(target_md, target_addr, 0);
4157     return 0;
4158 }
4159 
4160 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4161                                                struct msqid_ds *host_md)
4162 {
4163     struct target_msqid_ds *target_md;
4164 
4165     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4166         return -TARGET_EFAULT;
4167     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4168         return -TARGET_EFAULT;
4169     target_md->msg_stime = tswapal(host_md->msg_stime);
4170     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4171     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4172     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4173     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4174     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4175     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4176     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4177     unlock_user_struct(target_md, target_addr, 1);
4178     return 0;
4179 }
4180 
4181 struct target_msginfo {
4182     int msgpool;
4183     int msgmap;
4184     int msgmax;
4185     int msgmnb;
4186     int msgmni;
4187     int msgssz;
4188     int msgtql;
4189     unsigned short int msgseg;
4190 };
4191 
4192 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4193                                               struct msginfo *host_msginfo)
4194 {
4195     struct target_msginfo *target_msginfo;
4196     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4197         return -TARGET_EFAULT;
4198     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4199     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4200     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4201     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4202     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4203     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4204     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4205     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4206     unlock_user_struct(target_msginfo, target_addr, 1);
4207     return 0;
4208 }
4209 
4210 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4211 {
4212     struct msqid_ds dsarg;
4213     struct msginfo msginfo;
4214     abi_long ret = -TARGET_EINVAL;
4215 
4216     cmd &= 0xff;
4217 
4218     switch (cmd) {
4219     case IPC_STAT:
4220     case IPC_SET:
4221     case MSG_STAT:
4222         if (target_to_host_msqid_ds(&dsarg,ptr))
4223             return -TARGET_EFAULT;
4224         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4225         if (host_to_target_msqid_ds(ptr,&dsarg))
4226             return -TARGET_EFAULT;
4227         break;
4228     case IPC_RMID:
4229         ret = get_errno(msgctl(msgid, cmd, NULL));
4230         break;
4231     case IPC_INFO:
4232     case MSG_INFO:
4233         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4234         if (host_to_target_msginfo(ptr, &msginfo))
4235             return -TARGET_EFAULT;
4236         break;
4237     }
4238 
4239     return ret;
4240 }
4241 
4242 struct target_msgbuf {
4243     abi_long mtype;
4244     char	mtext[1];
4245 };
4246 
4247 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4248                                  ssize_t msgsz, int msgflg)
4249 {
4250     struct target_msgbuf *target_mb;
4251     struct msgbuf *host_mb;
4252     abi_long ret = 0;
4253 
4254     if (msgsz < 0) {
4255         return -TARGET_EINVAL;
4256     }
4257 
4258     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4259         return -TARGET_EFAULT;
4260     host_mb = g_try_malloc(msgsz + sizeof(long));
4261     if (!host_mb) {
4262         unlock_user_struct(target_mb, msgp, 0);
4263         return -TARGET_ENOMEM;
4264     }
4265     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4266     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4267     ret = -TARGET_ENOSYS;
4268 #ifdef __NR_msgsnd
4269     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4270 #endif
4271 #ifdef __NR_ipc
4272     if (ret == -TARGET_ENOSYS) {
4273 #ifdef __s390x__
4274         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4275                                  host_mb));
4276 #else
4277         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4278                                  host_mb, 0));
4279 #endif
4280     }
4281 #endif
4282     g_free(host_mb);
4283     unlock_user_struct(target_mb, msgp, 0);
4284 
4285     return ret;
4286 }
4287 
4288 #ifdef __NR_ipc
4289 #if defined(__sparc__)
4290 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4291 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4292 #elif defined(__s390x__)
4293 /* The s390 sys_ipc variant has only five parameters.  */
4294 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4295     ((long int[]){(long int)__msgp, __msgtyp})
4296 #else
4297 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4298     ((long int[]){(long int)__msgp, __msgtyp}), 0
4299 #endif
4300 #endif
4301 
4302 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4303                                  ssize_t msgsz, abi_long msgtyp,
4304                                  int msgflg)
4305 {
4306     struct target_msgbuf *target_mb;
4307     char *target_mtext;
4308     struct msgbuf *host_mb;
4309     abi_long ret = 0;
4310 
4311     if (msgsz < 0) {
4312         return -TARGET_EINVAL;
4313     }
4314 
4315     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4316         return -TARGET_EFAULT;
4317 
4318     host_mb = g_try_malloc(msgsz + sizeof(long));
4319     if (!host_mb) {
4320         ret = -TARGET_ENOMEM;
4321         goto end;
4322     }
4323     ret = -TARGET_ENOSYS;
4324 #ifdef __NR_msgrcv
4325     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4326 #endif
4327 #ifdef __NR_ipc
4328     if (ret == -TARGET_ENOSYS) {
4329         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4330                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4331     }
4332 #endif
4333 
4334     if (ret > 0) {
4335         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4336         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4337         if (!target_mtext) {
4338             ret = -TARGET_EFAULT;
4339             goto end;
4340         }
4341         memcpy(target_mb->mtext, host_mb->mtext, ret);
4342         unlock_user(target_mtext, target_mtext_addr, ret);
4343     }
4344 
4345     target_mb->mtype = tswapal(host_mb->mtype);
4346 
4347 end:
4348     if (target_mb)
4349         unlock_user_struct(target_mb, msgp, 1);
4350     g_free(host_mb);
4351     return ret;
4352 }
4353 
4354 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4355                                                abi_ulong target_addr)
4356 {
4357     struct target_shmid_ds *target_sd;
4358 
4359     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4360         return -TARGET_EFAULT;
4361     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4362         return -TARGET_EFAULT;
4363     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4364     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4365     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4366     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4367     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4368     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4369     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4370     unlock_user_struct(target_sd, target_addr, 0);
4371     return 0;
4372 }
4373 
4374 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4375                                                struct shmid_ds *host_sd)
4376 {
4377     struct target_shmid_ds *target_sd;
4378 
4379     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4380         return -TARGET_EFAULT;
4381     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4382         return -TARGET_EFAULT;
4383     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4384     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4385     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4386     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4387     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4388     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4389     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4390     unlock_user_struct(target_sd, target_addr, 1);
4391     return 0;
4392 }
4393 
4394 struct  target_shminfo {
4395     abi_ulong shmmax;
4396     abi_ulong shmmin;
4397     abi_ulong shmmni;
4398     abi_ulong shmseg;
4399     abi_ulong shmall;
4400 };
4401 
4402 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4403                                               struct shminfo *host_shminfo)
4404 {
4405     struct target_shminfo *target_shminfo;
4406     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4407         return -TARGET_EFAULT;
4408     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4409     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4410     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4411     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4412     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4413     unlock_user_struct(target_shminfo, target_addr, 1);
4414     return 0;
4415 }
4416 
4417 struct target_shm_info {
4418     int used_ids;
4419     abi_ulong shm_tot;
4420     abi_ulong shm_rss;
4421     abi_ulong shm_swp;
4422     abi_ulong swap_attempts;
4423     abi_ulong swap_successes;
4424 };
4425 
4426 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4427                                                struct shm_info *host_shm_info)
4428 {
4429     struct target_shm_info *target_shm_info;
4430     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4431         return -TARGET_EFAULT;
4432     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4433     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4434     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4435     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4436     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4437     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4438     unlock_user_struct(target_shm_info, target_addr, 1);
4439     return 0;
4440 }
4441 
4442 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4443 {
4444     struct shmid_ds dsarg;
4445     struct shminfo shminfo;
4446     struct shm_info shm_info;
4447     abi_long ret = -TARGET_EINVAL;
4448 
4449     cmd &= 0xff;
4450 
4451     switch(cmd) {
4452     case IPC_STAT:
4453     case IPC_SET:
4454     case SHM_STAT:
4455         if (target_to_host_shmid_ds(&dsarg, buf))
4456             return -TARGET_EFAULT;
4457         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4458         if (host_to_target_shmid_ds(buf, &dsarg))
4459             return -TARGET_EFAULT;
4460         break;
4461     case IPC_INFO:
4462         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4463         if (host_to_target_shminfo(buf, &shminfo))
4464             return -TARGET_EFAULT;
4465         break;
4466     case SHM_INFO:
4467         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4468         if (host_to_target_shm_info(buf, &shm_info))
4469             return -TARGET_EFAULT;
4470         break;
4471     case IPC_RMID:
4472     case SHM_LOCK:
4473     case SHM_UNLOCK:
4474         ret = get_errno(shmctl(shmid, cmd, NULL));
4475         break;
4476     }
4477 
4478     return ret;
4479 }
4480 
4481 #ifndef TARGET_FORCE_SHMLBA
4482 /* For most architectures, SHMLBA is the same as the page size;
4483  * some architectures have larger values, in which case they should
4484  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4485  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4486  * and defining its own value for SHMLBA.
4487  *
4488  * The kernel also permits SHMLBA to be set by the architecture to a
4489  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4490  * this means that addresses are rounded to the large size if
4491  * SHM_RND is set but addresses not aligned to that size are not rejected
4492  * as long as they are at least page-aligned. Since the only architecture
4493  * which uses this is ia64 this code doesn't provide for that oddity.
4494  */
4495 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4496 {
4497     return TARGET_PAGE_SIZE;
4498 }
4499 #endif
4500 
4501 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4502                                  int shmid, abi_ulong shmaddr, int shmflg)
4503 {
4504     CPUState *cpu = env_cpu(cpu_env);
4505     abi_long raddr;
4506     void *host_raddr;
4507     struct shmid_ds shm_info;
4508     int i,ret;
4509     abi_ulong shmlba;
4510 
4511     /* shmat pointers are always untagged */
4512 
4513     /* find out the length of the shared memory segment */
4514     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4515     if (is_error(ret)) {
4516         /* can't get length, bail out */
4517         return ret;
4518     }
4519 
4520     shmlba = target_shmlba(cpu_env);
4521 
4522     if (shmaddr & (shmlba - 1)) {
4523         if (shmflg & SHM_RND) {
4524             shmaddr &= ~(shmlba - 1);
4525         } else {
4526             return -TARGET_EINVAL;
4527         }
4528     }
4529     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4530         return -TARGET_EINVAL;
4531     }
4532 
4533     mmap_lock();
4534 
4535     /*
4536      * We're mapping shared memory, so ensure we generate code for parallel
4537      * execution and flush old translations.  This will work up to the level
4538      * supported by the host -- anything that requires EXCP_ATOMIC will not
4539      * be atomic with respect to an external process.
4540      */
4541     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4542         cpu->tcg_cflags |= CF_PARALLEL;
4543         tb_flush(cpu);
4544     }
4545 
4546     if (shmaddr)
4547         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4548     else {
4549         abi_ulong mmap_start;
4550 
4551         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4552         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4553 
4554         if (mmap_start == -1) {
4555             errno = ENOMEM;
4556             host_raddr = (void *)-1;
4557         } else
4558             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4559                                shmflg | SHM_REMAP);
4560     }
4561 
4562     if (host_raddr == (void *)-1) {
4563         mmap_unlock();
4564         return get_errno((long)host_raddr);
4565     }
4566     raddr=h2g((unsigned long)host_raddr);
4567 
4568     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4569                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4570                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4571 
4572     for (i = 0; i < N_SHM_REGIONS; i++) {
4573         if (!shm_regions[i].in_use) {
4574             shm_regions[i].in_use = true;
4575             shm_regions[i].start = raddr;
4576             shm_regions[i].size = shm_info.shm_segsz;
4577             break;
4578         }
4579     }
4580 
4581     mmap_unlock();
4582     return raddr;
4583 
4584 }
4585 
4586 static inline abi_long do_shmdt(abi_ulong shmaddr)
4587 {
4588     int i;
4589     abi_long rv;
4590 
4591     /* shmdt pointers are always untagged */
4592 
4593     mmap_lock();
4594 
4595     for (i = 0; i < N_SHM_REGIONS; ++i) {
4596         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4597             shm_regions[i].in_use = false;
4598             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4599             break;
4600         }
4601     }
4602     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4603 
4604     mmap_unlock();
4605 
4606     return rv;
4607 }
4608 
4609 #ifdef TARGET_NR_ipc
4610 /* ??? This only works with linear mappings.  */
4611 /* do_ipc() must return target values and target errnos. */
4612 static abi_long do_ipc(CPUArchState *cpu_env,
4613                        unsigned int call, abi_long first,
4614                        abi_long second, abi_long third,
4615                        abi_long ptr, abi_long fifth)
4616 {
4617     int version;
4618     abi_long ret = 0;
4619 
4620     version = call >> 16;
4621     call &= 0xffff;
4622 
4623     switch (call) {
4624     case IPCOP_semop:
4625         ret = do_semtimedop(first, ptr, second, 0, false);
4626         break;
4627     case IPCOP_semtimedop:
4628     /*
4629      * The s390 sys_ipc variant has only five parameters instead of six
4630      * (as for default variant) and the only difference is the handling of
4631      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4632      * to a struct timespec where the generic variant uses fifth parameter.
4633      */
4634 #if defined(TARGET_S390X)
4635         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4636 #else
4637         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4638 #endif
4639         break;
4640 
4641     case IPCOP_semget:
4642         ret = get_errno(semget(first, second, third));
4643         break;
4644 
4645     case IPCOP_semctl: {
4646         /* The semun argument to semctl is passed by value, so dereference the
4647          * ptr argument. */
4648         abi_ulong atptr;
4649         get_user_ual(atptr, ptr);
4650         ret = do_semctl(first, second, third, atptr);
4651         break;
4652     }
4653 
4654     case IPCOP_msgget:
4655         ret = get_errno(msgget(first, second));
4656         break;
4657 
4658     case IPCOP_msgsnd:
4659         ret = do_msgsnd(first, ptr, second, third);
4660         break;
4661 
4662     case IPCOP_msgctl:
4663         ret = do_msgctl(first, second, ptr);
4664         break;
4665 
4666     case IPCOP_msgrcv:
4667         switch (version) {
4668         case 0:
4669             {
4670                 struct target_ipc_kludge {
4671                     abi_long msgp;
4672                     abi_long msgtyp;
4673                 } *tmp;
4674 
4675                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4676                     ret = -TARGET_EFAULT;
4677                     break;
4678                 }
4679 
4680                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4681 
4682                 unlock_user_struct(tmp, ptr, 0);
4683                 break;
4684             }
4685         default:
4686             ret = do_msgrcv(first, ptr, second, fifth, third);
4687         }
4688         break;
4689 
4690     case IPCOP_shmat:
4691         switch (version) {
4692         default:
4693         {
4694             abi_ulong raddr;
4695             raddr = do_shmat(cpu_env, first, ptr, second);
4696             if (is_error(raddr))
4697                 return get_errno(raddr);
4698             if (put_user_ual(raddr, third))
4699                 return -TARGET_EFAULT;
4700             break;
4701         }
4702         case 1:
4703             ret = -TARGET_EINVAL;
4704             break;
4705         }
4706 	break;
4707     case IPCOP_shmdt:
4708         ret = do_shmdt(ptr);
4709 	break;
4710 
4711     case IPCOP_shmget:
4712 	/* IPC_* flag values are the same on all linux platforms */
4713 	ret = get_errno(shmget(first, second, third));
4714 	break;
4715 
4716 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4717     case IPCOP_shmctl:
4718         ret = do_shmctl(first, second, ptr);
4719         break;
4720     default:
4721         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4722                       call, version);
4723 	ret = -TARGET_ENOSYS;
4724 	break;
4725     }
4726     return ret;
4727 }
4728 #endif
4729 
4730 /* kernel structure types definitions */
4731 
4732 #define STRUCT(name, ...) STRUCT_ ## name,
4733 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4734 enum {
4735 #include "syscall_types.h"
4736 STRUCT_MAX
4737 };
4738 #undef STRUCT
4739 #undef STRUCT_SPECIAL
4740 
4741 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4742 #define STRUCT_SPECIAL(name)
4743 #include "syscall_types.h"
4744 #undef STRUCT
4745 #undef STRUCT_SPECIAL
4746 
4747 #define MAX_STRUCT_SIZE 4096
4748 
4749 #ifdef CONFIG_FIEMAP
4750 /* So fiemap access checks don't overflow on 32 bit systems.
4751  * This is very slightly smaller than the limit imposed by
4752  * the underlying kernel.
4753  */
4754 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4755                             / sizeof(struct fiemap_extent))
4756 
4757 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4758                                        int fd, int cmd, abi_long arg)
4759 {
4760     /* The parameter for this ioctl is a struct fiemap followed
4761      * by an array of struct fiemap_extent whose size is set
4762      * in fiemap->fm_extent_count. The array is filled in by the
4763      * ioctl.
4764      */
4765     int target_size_in, target_size_out;
4766     struct fiemap *fm;
4767     const argtype *arg_type = ie->arg_type;
4768     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4769     void *argptr, *p;
4770     abi_long ret;
4771     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4772     uint32_t outbufsz;
4773     int free_fm = 0;
4774 
4775     assert(arg_type[0] == TYPE_PTR);
4776     assert(ie->access == IOC_RW);
4777     arg_type++;
4778     target_size_in = thunk_type_size(arg_type, 0);
4779     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4780     if (!argptr) {
4781         return -TARGET_EFAULT;
4782     }
4783     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4784     unlock_user(argptr, arg, 0);
4785     fm = (struct fiemap *)buf_temp;
4786     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4787         return -TARGET_EINVAL;
4788     }
4789 
4790     outbufsz = sizeof (*fm) +
4791         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4792 
4793     if (outbufsz > MAX_STRUCT_SIZE) {
4794         /* We can't fit all the extents into the fixed size buffer.
4795          * Allocate one that is large enough and use it instead.
4796          */
4797         fm = g_try_malloc(outbufsz);
4798         if (!fm) {
4799             return -TARGET_ENOMEM;
4800         }
4801         memcpy(fm, buf_temp, sizeof(struct fiemap));
4802         free_fm = 1;
4803     }
4804     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4805     if (!is_error(ret)) {
4806         target_size_out = target_size_in;
4807         /* An extent_count of 0 means we were only counting the extents
4808          * so there are no structs to copy
4809          */
4810         if (fm->fm_extent_count != 0) {
4811             target_size_out += fm->fm_mapped_extents * extent_size;
4812         }
4813         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4814         if (!argptr) {
4815             ret = -TARGET_EFAULT;
4816         } else {
4817             /* Convert the struct fiemap */
4818             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4819             if (fm->fm_extent_count != 0) {
4820                 p = argptr + target_size_in;
4821                 /* ...and then all the struct fiemap_extents */
4822                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4823                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4824                                   THUNK_TARGET);
4825                     p += extent_size;
4826                 }
4827             }
4828             unlock_user(argptr, arg, target_size_out);
4829         }
4830     }
4831     if (free_fm) {
4832         g_free(fm);
4833     }
4834     return ret;
4835 }
4836 #endif
4837 
4838 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4839                                 int fd, int cmd, abi_long arg)
4840 {
4841     const argtype *arg_type = ie->arg_type;
4842     int target_size;
4843     void *argptr;
4844     int ret;
4845     struct ifconf *host_ifconf;
4846     uint32_t outbufsz;
4847     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4848     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4849     int target_ifreq_size;
4850     int nb_ifreq;
4851     int free_buf = 0;
4852     int i;
4853     int target_ifc_len;
4854     abi_long target_ifc_buf;
4855     int host_ifc_len;
4856     char *host_ifc_buf;
4857 
4858     assert(arg_type[0] == TYPE_PTR);
4859     assert(ie->access == IOC_RW);
4860 
4861     arg_type++;
4862     target_size = thunk_type_size(arg_type, 0);
4863 
4864     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4865     if (!argptr)
4866         return -TARGET_EFAULT;
4867     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4868     unlock_user(argptr, arg, 0);
4869 
4870     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4871     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4872     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4873 
4874     if (target_ifc_buf != 0) {
4875         target_ifc_len = host_ifconf->ifc_len;
4876         nb_ifreq = target_ifc_len / target_ifreq_size;
4877         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4878 
4879         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4880         if (outbufsz > MAX_STRUCT_SIZE) {
4881             /*
4882              * We can't fit all the extents into the fixed size buffer.
4883              * Allocate one that is large enough and use it instead.
4884              */
4885             host_ifconf = g_try_malloc(outbufsz);
4886             if (!host_ifconf) {
4887                 return -TARGET_ENOMEM;
4888             }
4889             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4890             free_buf = 1;
4891         }
4892         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4893 
4894         host_ifconf->ifc_len = host_ifc_len;
4895     } else {
4896       host_ifc_buf = NULL;
4897     }
4898     host_ifconf->ifc_buf = host_ifc_buf;
4899 
4900     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4901     if (!is_error(ret)) {
4902 	/* convert host ifc_len to target ifc_len */
4903 
4904         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4905         target_ifc_len = nb_ifreq * target_ifreq_size;
4906         host_ifconf->ifc_len = target_ifc_len;
4907 
4908 	/* restore target ifc_buf */
4909 
4910         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4911 
4912 	/* copy struct ifconf to target user */
4913 
4914         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4915         if (!argptr)
4916             return -TARGET_EFAULT;
4917         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4918         unlock_user(argptr, arg, target_size);
4919 
4920         if (target_ifc_buf != 0) {
4921             /* copy ifreq[] to target user */
4922             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4923             for (i = 0; i < nb_ifreq ; i++) {
4924                 thunk_convert(argptr + i * target_ifreq_size,
4925                               host_ifc_buf + i * sizeof(struct ifreq),
4926                               ifreq_arg_type, THUNK_TARGET);
4927             }
4928             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4929         }
4930     }
4931 
4932     if (free_buf) {
4933         g_free(host_ifconf);
4934     }
4935 
4936     return ret;
4937 }
4938 
4939 #if defined(CONFIG_USBFS)
4940 #if HOST_LONG_BITS > 64
4941 #error USBDEVFS thunks do not support >64 bit hosts yet.
4942 #endif
4943 struct live_urb {
4944     uint64_t target_urb_adr;
4945     uint64_t target_buf_adr;
4946     char *target_buf_ptr;
4947     struct usbdevfs_urb host_urb;
4948 };
4949 
4950 static GHashTable *usbdevfs_urb_hashtable(void)
4951 {
4952     static GHashTable *urb_hashtable;
4953 
4954     if (!urb_hashtable) {
4955         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4956     }
4957     return urb_hashtable;
4958 }
4959 
4960 static void urb_hashtable_insert(struct live_urb *urb)
4961 {
4962     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4963     g_hash_table_insert(urb_hashtable, urb, urb);
4964 }
4965 
4966 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4967 {
4968     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4969     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4970 }
4971 
4972 static void urb_hashtable_remove(struct live_urb *urb)
4973 {
4974     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4975     g_hash_table_remove(urb_hashtable, urb);
4976 }
4977 
4978 static abi_long
4979 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4980                           int fd, int cmd, abi_long arg)
4981 {
4982     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4983     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4984     struct live_urb *lurb;
4985     void *argptr;
4986     uint64_t hurb;
4987     int target_size;
4988     uintptr_t target_urb_adr;
4989     abi_long ret;
4990 
4991     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4992 
4993     memset(buf_temp, 0, sizeof(uint64_t));
4994     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4995     if (is_error(ret)) {
4996         return ret;
4997     }
4998 
4999     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5000     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5001     if (!lurb->target_urb_adr) {
5002         return -TARGET_EFAULT;
5003     }
5004     urb_hashtable_remove(lurb);
5005     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5006         lurb->host_urb.buffer_length);
5007     lurb->target_buf_ptr = NULL;
5008 
5009     /* restore the guest buffer pointer */
5010     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5011 
5012     /* update the guest urb struct */
5013     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5014     if (!argptr) {
5015         g_free(lurb);
5016         return -TARGET_EFAULT;
5017     }
5018     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5019     unlock_user(argptr, lurb->target_urb_adr, target_size);
5020 
5021     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5022     /* write back the urb handle */
5023     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5024     if (!argptr) {
5025         g_free(lurb);
5026         return -TARGET_EFAULT;
5027     }
5028 
5029     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5030     target_urb_adr = lurb->target_urb_adr;
5031     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5032     unlock_user(argptr, arg, target_size);
5033 
5034     g_free(lurb);
5035     return ret;
5036 }
5037 
5038 static abi_long
5039 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5040                              uint8_t *buf_temp __attribute__((unused)),
5041                              int fd, int cmd, abi_long arg)
5042 {
5043     struct live_urb *lurb;
5044 
5045     /* map target address back to host URB with metadata. */
5046     lurb = urb_hashtable_lookup(arg);
5047     if (!lurb) {
5048         return -TARGET_EFAULT;
5049     }
5050     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5051 }
5052 
5053 static abi_long
5054 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5055                             int fd, int cmd, abi_long arg)
5056 {
5057     const argtype *arg_type = ie->arg_type;
5058     int target_size;
5059     abi_long ret;
5060     void *argptr;
5061     int rw_dir;
5062     struct live_urb *lurb;
5063 
5064     /*
5065      * each submitted URB needs to map to a unique ID for the
5066      * kernel, and that unique ID needs to be a pointer to
5067      * host memory.  hence, we need to malloc for each URB.
5068      * isochronous transfers have a variable length struct.
5069      */
5070     arg_type++;
5071     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5072 
5073     /* construct host copy of urb and metadata */
5074     lurb = g_try_new0(struct live_urb, 1);
5075     if (!lurb) {
5076         return -TARGET_ENOMEM;
5077     }
5078 
5079     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5080     if (!argptr) {
5081         g_free(lurb);
5082         return -TARGET_EFAULT;
5083     }
5084     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5085     unlock_user(argptr, arg, 0);
5086 
5087     lurb->target_urb_adr = arg;
5088     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5089 
5090     /* buffer space used depends on endpoint type so lock the entire buffer */
5091     /* control type urbs should check the buffer contents for true direction */
5092     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5093     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5094         lurb->host_urb.buffer_length, 1);
5095     if (lurb->target_buf_ptr == NULL) {
5096         g_free(lurb);
5097         return -TARGET_EFAULT;
5098     }
5099 
5100     /* update buffer pointer in host copy */
5101     lurb->host_urb.buffer = lurb->target_buf_ptr;
5102 
5103     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5104     if (is_error(ret)) {
5105         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5106         g_free(lurb);
5107     } else {
5108         urb_hashtable_insert(lurb);
5109     }
5110 
5111     return ret;
5112 }
5113 #endif /* CONFIG_USBFS */
5114 
5115 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5116                             int cmd, abi_long arg)
5117 {
5118     void *argptr;
5119     struct dm_ioctl *host_dm;
5120     abi_long guest_data;
5121     uint32_t guest_data_size;
5122     int target_size;
5123     const argtype *arg_type = ie->arg_type;
5124     abi_long ret;
5125     void *big_buf = NULL;
5126     char *host_data;
5127 
5128     arg_type++;
5129     target_size = thunk_type_size(arg_type, 0);
5130     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5131     if (!argptr) {
5132         ret = -TARGET_EFAULT;
5133         goto out;
5134     }
5135     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5136     unlock_user(argptr, arg, 0);
5137 
5138     /* buf_temp is too small, so fetch things into a bigger buffer */
5139     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5140     memcpy(big_buf, buf_temp, target_size);
5141     buf_temp = big_buf;
5142     host_dm = big_buf;
5143 
5144     guest_data = arg + host_dm->data_start;
5145     if ((guest_data - arg) < 0) {
5146         ret = -TARGET_EINVAL;
5147         goto out;
5148     }
5149     guest_data_size = host_dm->data_size - host_dm->data_start;
5150     host_data = (char*)host_dm + host_dm->data_start;
5151 
5152     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5153     if (!argptr) {
5154         ret = -TARGET_EFAULT;
5155         goto out;
5156     }
5157 
5158     switch (ie->host_cmd) {
5159     case DM_REMOVE_ALL:
5160     case DM_LIST_DEVICES:
5161     case DM_DEV_CREATE:
5162     case DM_DEV_REMOVE:
5163     case DM_DEV_SUSPEND:
5164     case DM_DEV_STATUS:
5165     case DM_DEV_WAIT:
5166     case DM_TABLE_STATUS:
5167     case DM_TABLE_CLEAR:
5168     case DM_TABLE_DEPS:
5169     case DM_LIST_VERSIONS:
5170         /* no input data */
5171         break;
5172     case DM_DEV_RENAME:
5173     case DM_DEV_SET_GEOMETRY:
5174         /* data contains only strings */
5175         memcpy(host_data, argptr, guest_data_size);
5176         break;
5177     case DM_TARGET_MSG:
5178         memcpy(host_data, argptr, guest_data_size);
5179         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5180         break;
5181     case DM_TABLE_LOAD:
5182     {
5183         void *gspec = argptr;
5184         void *cur_data = host_data;
5185         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5186         int spec_size = thunk_type_size(arg_type, 0);
5187         int i;
5188 
5189         for (i = 0; i < host_dm->target_count; i++) {
5190             struct dm_target_spec *spec = cur_data;
5191             uint32_t next;
5192             int slen;
5193 
5194             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5195             slen = strlen((char*)gspec + spec_size) + 1;
5196             next = spec->next;
5197             spec->next = sizeof(*spec) + slen;
5198             strcpy((char*)&spec[1], gspec + spec_size);
5199             gspec += next;
5200             cur_data += spec->next;
5201         }
5202         break;
5203     }
5204     default:
5205         ret = -TARGET_EINVAL;
5206         unlock_user(argptr, guest_data, 0);
5207         goto out;
5208     }
5209     unlock_user(argptr, guest_data, 0);
5210 
5211     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5212     if (!is_error(ret)) {
5213         guest_data = arg + host_dm->data_start;
5214         guest_data_size = host_dm->data_size - host_dm->data_start;
5215         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5216         switch (ie->host_cmd) {
5217         case DM_REMOVE_ALL:
5218         case DM_DEV_CREATE:
5219         case DM_DEV_REMOVE:
5220         case DM_DEV_RENAME:
5221         case DM_DEV_SUSPEND:
5222         case DM_DEV_STATUS:
5223         case DM_TABLE_LOAD:
5224         case DM_TABLE_CLEAR:
5225         case DM_TARGET_MSG:
5226         case DM_DEV_SET_GEOMETRY:
5227             /* no return data */
5228             break;
5229         case DM_LIST_DEVICES:
5230         {
5231             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5232             uint32_t remaining_data = guest_data_size;
5233             void *cur_data = argptr;
5234             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5235             int nl_size = 12; /* can't use thunk_size due to alignment */
5236 
5237             while (1) {
5238                 uint32_t next = nl->next;
5239                 if (next) {
5240                     nl->next = nl_size + (strlen(nl->name) + 1);
5241                 }
5242                 if (remaining_data < nl->next) {
5243                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5244                     break;
5245                 }
5246                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5247                 strcpy(cur_data + nl_size, nl->name);
5248                 cur_data += nl->next;
5249                 remaining_data -= nl->next;
5250                 if (!next) {
5251                     break;
5252                 }
5253                 nl = (void*)nl + next;
5254             }
5255             break;
5256         }
5257         case DM_DEV_WAIT:
5258         case DM_TABLE_STATUS:
5259         {
5260             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5261             void *cur_data = argptr;
5262             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5263             int spec_size = thunk_type_size(arg_type, 0);
5264             int i;
5265 
5266             for (i = 0; i < host_dm->target_count; i++) {
5267                 uint32_t next = spec->next;
5268                 int slen = strlen((char*)&spec[1]) + 1;
5269                 spec->next = (cur_data - argptr) + spec_size + slen;
5270                 if (guest_data_size < spec->next) {
5271                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5272                     break;
5273                 }
5274                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5275                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5276                 cur_data = argptr + spec->next;
5277                 spec = (void*)host_dm + host_dm->data_start + next;
5278             }
5279             break;
5280         }
5281         case DM_TABLE_DEPS:
5282         {
5283             void *hdata = (void*)host_dm + host_dm->data_start;
5284             int count = *(uint32_t*)hdata;
5285             uint64_t *hdev = hdata + 8;
5286             uint64_t *gdev = argptr + 8;
5287             int i;
5288 
5289             *(uint32_t*)argptr = tswap32(count);
5290             for (i = 0; i < count; i++) {
5291                 *gdev = tswap64(*hdev);
5292                 gdev++;
5293                 hdev++;
5294             }
5295             break;
5296         }
5297         case DM_LIST_VERSIONS:
5298         {
5299             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5300             uint32_t remaining_data = guest_data_size;
5301             void *cur_data = argptr;
5302             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5303             int vers_size = thunk_type_size(arg_type, 0);
5304 
5305             while (1) {
5306                 uint32_t next = vers->next;
5307                 if (next) {
5308                     vers->next = vers_size + (strlen(vers->name) + 1);
5309                 }
5310                 if (remaining_data < vers->next) {
5311                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5312                     break;
5313                 }
5314                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5315                 strcpy(cur_data + vers_size, vers->name);
5316                 cur_data += vers->next;
5317                 remaining_data -= vers->next;
5318                 if (!next) {
5319                     break;
5320                 }
5321                 vers = (void*)vers + next;
5322             }
5323             break;
5324         }
5325         default:
5326             unlock_user(argptr, guest_data, 0);
5327             ret = -TARGET_EINVAL;
5328             goto out;
5329         }
5330         unlock_user(argptr, guest_data, guest_data_size);
5331 
5332         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5333         if (!argptr) {
5334             ret = -TARGET_EFAULT;
5335             goto out;
5336         }
5337         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5338         unlock_user(argptr, arg, target_size);
5339     }
5340 out:
5341     g_free(big_buf);
5342     return ret;
5343 }
5344 
5345 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5346                                int cmd, abi_long arg)
5347 {
5348     void *argptr;
5349     int target_size;
5350     const argtype *arg_type = ie->arg_type;
5351     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5352     abi_long ret;
5353 
5354     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5355     struct blkpg_partition host_part;
5356 
5357     /* Read and convert blkpg */
5358     arg_type++;
5359     target_size = thunk_type_size(arg_type, 0);
5360     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5361     if (!argptr) {
5362         ret = -TARGET_EFAULT;
5363         goto out;
5364     }
5365     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5366     unlock_user(argptr, arg, 0);
5367 
5368     switch (host_blkpg->op) {
5369     case BLKPG_ADD_PARTITION:
5370     case BLKPG_DEL_PARTITION:
5371         /* payload is struct blkpg_partition */
5372         break;
5373     default:
5374         /* Unknown opcode */
5375         ret = -TARGET_EINVAL;
5376         goto out;
5377     }
5378 
5379     /* Read and convert blkpg->data */
5380     arg = (abi_long)(uintptr_t)host_blkpg->data;
5381     target_size = thunk_type_size(part_arg_type, 0);
5382     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5383     if (!argptr) {
5384         ret = -TARGET_EFAULT;
5385         goto out;
5386     }
5387     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5388     unlock_user(argptr, arg, 0);
5389 
5390     /* Swizzle the data pointer to our local copy and call! */
5391     host_blkpg->data = &host_part;
5392     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5393 
5394 out:
5395     return ret;
5396 }
5397 
5398 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5399                                 int fd, int cmd, abi_long arg)
5400 {
5401     const argtype *arg_type = ie->arg_type;
5402     const StructEntry *se;
5403     const argtype *field_types;
5404     const int *dst_offsets, *src_offsets;
5405     int target_size;
5406     void *argptr;
5407     abi_ulong *target_rt_dev_ptr = NULL;
5408     unsigned long *host_rt_dev_ptr = NULL;
5409     abi_long ret;
5410     int i;
5411 
5412     assert(ie->access == IOC_W);
5413     assert(*arg_type == TYPE_PTR);
5414     arg_type++;
5415     assert(*arg_type == TYPE_STRUCT);
5416     target_size = thunk_type_size(arg_type, 0);
5417     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5418     if (!argptr) {
5419         return -TARGET_EFAULT;
5420     }
5421     arg_type++;
5422     assert(*arg_type == (int)STRUCT_rtentry);
5423     se = struct_entries + *arg_type++;
5424     assert(se->convert[0] == NULL);
5425     /* convert struct here to be able to catch rt_dev string */
5426     field_types = se->field_types;
5427     dst_offsets = se->field_offsets[THUNK_HOST];
5428     src_offsets = se->field_offsets[THUNK_TARGET];
5429     for (i = 0; i < se->nb_fields; i++) {
5430         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5431             assert(*field_types == TYPE_PTRVOID);
5432             target_rt_dev_ptr = argptr + src_offsets[i];
5433             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5434             if (*target_rt_dev_ptr != 0) {
5435                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5436                                                   tswapal(*target_rt_dev_ptr));
5437                 if (!*host_rt_dev_ptr) {
5438                     unlock_user(argptr, arg, 0);
5439                     return -TARGET_EFAULT;
5440                 }
5441             } else {
5442                 *host_rt_dev_ptr = 0;
5443             }
5444             field_types++;
5445             continue;
5446         }
5447         field_types = thunk_convert(buf_temp + dst_offsets[i],
5448                                     argptr + src_offsets[i],
5449                                     field_types, THUNK_HOST);
5450     }
5451     unlock_user(argptr, arg, 0);
5452 
5453     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5454 
5455     assert(host_rt_dev_ptr != NULL);
5456     assert(target_rt_dev_ptr != NULL);
5457     if (*host_rt_dev_ptr != 0) {
5458         unlock_user((void *)*host_rt_dev_ptr,
5459                     *target_rt_dev_ptr, 0);
5460     }
5461     return ret;
5462 }
5463 
5464 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5465                                      int fd, int cmd, abi_long arg)
5466 {
5467     int sig = target_to_host_signal(arg);
5468     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5469 }
5470 
5471 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5472                                     int fd, int cmd, abi_long arg)
5473 {
5474     struct timeval tv;
5475     abi_long ret;
5476 
5477     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5478     if (is_error(ret)) {
5479         return ret;
5480     }
5481 
5482     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5483         if (copy_to_user_timeval(arg, &tv)) {
5484             return -TARGET_EFAULT;
5485         }
5486     } else {
5487         if (copy_to_user_timeval64(arg, &tv)) {
5488             return -TARGET_EFAULT;
5489         }
5490     }
5491 
5492     return ret;
5493 }
5494 
5495 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5496                                       int fd, int cmd, abi_long arg)
5497 {
5498     struct timespec ts;
5499     abi_long ret;
5500 
5501     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5502     if (is_error(ret)) {
5503         return ret;
5504     }
5505 
5506     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5507         if (host_to_target_timespec(arg, &ts)) {
5508             return -TARGET_EFAULT;
5509         }
5510     } else{
5511         if (host_to_target_timespec64(arg, &ts)) {
5512             return -TARGET_EFAULT;
5513         }
5514     }
5515 
5516     return ret;
5517 }
5518 
5519 #ifdef TIOCGPTPEER
5520 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5521                                      int fd, int cmd, abi_long arg)
5522 {
5523     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5524     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5525 }
5526 #endif
5527 
5528 #ifdef HAVE_DRM_H
5529 
5530 static void unlock_drm_version(struct drm_version *host_ver,
5531                                struct target_drm_version *target_ver,
5532                                bool copy)
5533 {
5534     unlock_user(host_ver->name, target_ver->name,
5535                                 copy ? host_ver->name_len : 0);
5536     unlock_user(host_ver->date, target_ver->date,
5537                                 copy ? host_ver->date_len : 0);
5538     unlock_user(host_ver->desc, target_ver->desc,
5539                                 copy ? host_ver->desc_len : 0);
5540 }
5541 
5542 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5543                                           struct target_drm_version *target_ver)
5544 {
5545     memset(host_ver, 0, sizeof(*host_ver));
5546 
5547     __get_user(host_ver->name_len, &target_ver->name_len);
5548     if (host_ver->name_len) {
5549         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5550                                    target_ver->name_len, 0);
5551         if (!host_ver->name) {
5552             return -EFAULT;
5553         }
5554     }
5555 
5556     __get_user(host_ver->date_len, &target_ver->date_len);
5557     if (host_ver->date_len) {
5558         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5559                                    target_ver->date_len, 0);
5560         if (!host_ver->date) {
5561             goto err;
5562         }
5563     }
5564 
5565     __get_user(host_ver->desc_len, &target_ver->desc_len);
5566     if (host_ver->desc_len) {
5567         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5568                                    target_ver->desc_len, 0);
5569         if (!host_ver->desc) {
5570             goto err;
5571         }
5572     }
5573 
5574     return 0;
5575 err:
5576     unlock_drm_version(host_ver, target_ver, false);
5577     return -EFAULT;
5578 }
5579 
5580 static inline void host_to_target_drmversion(
5581                                           struct target_drm_version *target_ver,
5582                                           struct drm_version *host_ver)
5583 {
5584     __put_user(host_ver->version_major, &target_ver->version_major);
5585     __put_user(host_ver->version_minor, &target_ver->version_minor);
5586     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5587     __put_user(host_ver->name_len, &target_ver->name_len);
5588     __put_user(host_ver->date_len, &target_ver->date_len);
5589     __put_user(host_ver->desc_len, &target_ver->desc_len);
5590     unlock_drm_version(host_ver, target_ver, true);
5591 }
5592 
5593 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5594                              int fd, int cmd, abi_long arg)
5595 {
5596     struct drm_version *ver;
5597     struct target_drm_version *target_ver;
5598     abi_long ret;
5599 
5600     switch (ie->host_cmd) {
5601     case DRM_IOCTL_VERSION:
5602         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5603             return -TARGET_EFAULT;
5604         }
5605         ver = (struct drm_version *)buf_temp;
5606         ret = target_to_host_drmversion(ver, target_ver);
5607         if (!is_error(ret)) {
5608             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5609             if (is_error(ret)) {
5610                 unlock_drm_version(ver, target_ver, false);
5611             } else {
5612                 host_to_target_drmversion(target_ver, ver);
5613             }
5614         }
5615         unlock_user_struct(target_ver, arg, 0);
5616         return ret;
5617     }
5618     return -TARGET_ENOSYS;
5619 }
5620 
5621 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5622                                            struct drm_i915_getparam *gparam,
5623                                            int fd, abi_long arg)
5624 {
5625     abi_long ret;
5626     int value;
5627     struct target_drm_i915_getparam *target_gparam;
5628 
5629     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5630         return -TARGET_EFAULT;
5631     }
5632 
5633     __get_user(gparam->param, &target_gparam->param);
5634     gparam->value = &value;
5635     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5636     put_user_s32(value, target_gparam->value);
5637 
5638     unlock_user_struct(target_gparam, arg, 0);
5639     return ret;
5640 }
5641 
5642 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5643                                   int fd, int cmd, abi_long arg)
5644 {
5645     switch (ie->host_cmd) {
5646     case DRM_IOCTL_I915_GETPARAM:
5647         return do_ioctl_drm_i915_getparam(ie,
5648                                           (struct drm_i915_getparam *)buf_temp,
5649                                           fd, arg);
5650     default:
5651         return -TARGET_ENOSYS;
5652     }
5653 }
5654 
5655 #endif
5656 
5657 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5658                                         int fd, int cmd, abi_long arg)
5659 {
5660     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5661     struct tun_filter *target_filter;
5662     char *target_addr;
5663 
5664     assert(ie->access == IOC_W);
5665 
5666     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5667     if (!target_filter) {
5668         return -TARGET_EFAULT;
5669     }
5670     filter->flags = tswap16(target_filter->flags);
5671     filter->count = tswap16(target_filter->count);
5672     unlock_user(target_filter, arg, 0);
5673 
5674     if (filter->count) {
5675         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5676             MAX_STRUCT_SIZE) {
5677             return -TARGET_EFAULT;
5678         }
5679 
5680         target_addr = lock_user(VERIFY_READ,
5681                                 arg + offsetof(struct tun_filter, addr),
5682                                 filter->count * ETH_ALEN, 1);
5683         if (!target_addr) {
5684             return -TARGET_EFAULT;
5685         }
5686         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5687         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5688     }
5689 
5690     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5691 }
5692 
5693 IOCTLEntry ioctl_entries[] = {
5694 #define IOCTL(cmd, access, ...) \
5695     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5696 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5697     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5698 #define IOCTL_IGNORE(cmd) \
5699     { TARGET_ ## cmd, 0, #cmd },
5700 #include "ioctls.h"
5701     { 0, 0, },
5702 };
5703 
5704 /* ??? Implement proper locking for ioctls.  */
5705 /* do_ioctl() Must return target values and target errnos. */
5706 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5707 {
5708     const IOCTLEntry *ie;
5709     const argtype *arg_type;
5710     abi_long ret;
5711     uint8_t buf_temp[MAX_STRUCT_SIZE];
5712     int target_size;
5713     void *argptr;
5714 
5715     ie = ioctl_entries;
5716     for(;;) {
5717         if (ie->target_cmd == 0) {
5718             qemu_log_mask(
5719                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5720             return -TARGET_ENOSYS;
5721         }
5722         if (ie->target_cmd == cmd)
5723             break;
5724         ie++;
5725     }
5726     arg_type = ie->arg_type;
5727     if (ie->do_ioctl) {
5728         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5729     } else if (!ie->host_cmd) {
5730         /* Some architectures define BSD ioctls in their headers
5731            that are not implemented in Linux.  */
5732         return -TARGET_ENOSYS;
5733     }
5734 
5735     switch(arg_type[0]) {
5736     case TYPE_NULL:
5737         /* no argument */
5738         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5739         break;
5740     case TYPE_PTRVOID:
5741     case TYPE_INT:
5742     case TYPE_LONG:
5743     case TYPE_ULONG:
5744         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5745         break;
5746     case TYPE_PTR:
5747         arg_type++;
5748         target_size = thunk_type_size(arg_type, 0);
5749         switch(ie->access) {
5750         case IOC_R:
5751             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5752             if (!is_error(ret)) {
5753                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5754                 if (!argptr)
5755                     return -TARGET_EFAULT;
5756                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5757                 unlock_user(argptr, arg, target_size);
5758             }
5759             break;
5760         case IOC_W:
5761             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5762             if (!argptr)
5763                 return -TARGET_EFAULT;
5764             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5765             unlock_user(argptr, arg, 0);
5766             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5767             break;
5768         default:
5769         case IOC_RW:
5770             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5771             if (!argptr)
5772                 return -TARGET_EFAULT;
5773             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5774             unlock_user(argptr, arg, 0);
5775             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5776             if (!is_error(ret)) {
5777                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5778                 if (!argptr)
5779                     return -TARGET_EFAULT;
5780                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5781                 unlock_user(argptr, arg, target_size);
5782             }
5783             break;
5784         }
5785         break;
5786     default:
5787         qemu_log_mask(LOG_UNIMP,
5788                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5789                       (long)cmd, arg_type[0]);
5790         ret = -TARGET_ENOSYS;
5791         break;
5792     }
5793     return ret;
5794 }
5795 
5796 static const bitmask_transtbl iflag_tbl[] = {
5797         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5798         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5799         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5800         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5801         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5802         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5803         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5804         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5805         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5806         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5807         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5808         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5809         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5810         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5811         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5812         { 0, 0, 0, 0 }
5813 };
5814 
5815 static const bitmask_transtbl oflag_tbl[] = {
5816 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5817 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5818 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5819 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5820 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5821 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5822 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5823 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5824 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5825 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5826 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5827 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5828 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5829 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5830 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5831 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5832 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5833 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5834 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5835 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5836 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5837 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5838 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5839 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5840 	{ 0, 0, 0, 0 }
5841 };
5842 
5843 static const bitmask_transtbl cflag_tbl[] = {
5844 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5845 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5846 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5847 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5848 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5849 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5850 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5851 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5852 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5853 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5854 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5855 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5856 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5857 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5858 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5859 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5860 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5861 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5862 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5863 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5864 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5865 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5866 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5867 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5868 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5869 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5870 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5871 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5872 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5873 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5874 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5875 	{ 0, 0, 0, 0 }
5876 };
5877 
5878 static const bitmask_transtbl lflag_tbl[] = {
5879   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5880   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5881   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5882   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5883   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5884   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5885   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5886   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5887   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5888   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5889   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5890   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5891   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5892   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5893   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5894   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5895   { 0, 0, 0, 0 }
5896 };
5897 
5898 static void target_to_host_termios (void *dst, const void *src)
5899 {
5900     struct host_termios *host = dst;
5901     const struct target_termios *target = src;
5902 
5903     host->c_iflag =
5904         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5905     host->c_oflag =
5906         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5907     host->c_cflag =
5908         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5909     host->c_lflag =
5910         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5911     host->c_line = target->c_line;
5912 
5913     memset(host->c_cc, 0, sizeof(host->c_cc));
5914     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5915     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5916     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5917     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5918     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5919     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5920     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5921     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5922     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5923     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5924     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5925     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5926     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5927     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5928     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5929     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5930     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5931 }
5932 
5933 static void host_to_target_termios (void *dst, const void *src)
5934 {
5935     struct target_termios *target = dst;
5936     const struct host_termios *host = src;
5937 
5938     target->c_iflag =
5939         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5940     target->c_oflag =
5941         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5942     target->c_cflag =
5943         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5944     target->c_lflag =
5945         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5946     target->c_line = host->c_line;
5947 
5948     memset(target->c_cc, 0, sizeof(target->c_cc));
5949     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5950     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5951     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5952     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5953     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5954     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5955     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5956     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5957     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5958     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5959     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5960     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5961     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5962     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5963     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5964     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5965     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5966 }
5967 
5968 static const StructEntry struct_termios_def = {
5969     .convert = { host_to_target_termios, target_to_host_termios },
5970     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5971     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5972     .print = print_termios,
5973 };
5974 
5975 static const bitmask_transtbl mmap_flags_tbl[] = {
5976     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5977     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5978     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5979     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5980       MAP_ANONYMOUS, MAP_ANONYMOUS },
5981     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5982       MAP_GROWSDOWN, MAP_GROWSDOWN },
5983     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5984       MAP_DENYWRITE, MAP_DENYWRITE },
5985     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5986       MAP_EXECUTABLE, MAP_EXECUTABLE },
5987     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5988     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5989       MAP_NORESERVE, MAP_NORESERVE },
5990     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5991     /* MAP_STACK had been ignored by the kernel for quite some time.
5992        Recognize it for the target insofar as we do not want to pass
5993        it through to the host.  */
5994     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5995     { 0, 0, 0, 0 }
5996 };
5997 
5998 /*
5999  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6000  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6001  */
6002 #if defined(TARGET_I386)
6003 
6004 /* NOTE: there is really one LDT for all the threads */
6005 static uint8_t *ldt_table;
6006 
6007 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6008 {
6009     int size;
6010     void *p;
6011 
6012     if (!ldt_table)
6013         return 0;
6014     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6015     if (size > bytecount)
6016         size = bytecount;
6017     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6018     if (!p)
6019         return -TARGET_EFAULT;
6020     /* ??? Should this by byteswapped?  */
6021     memcpy(p, ldt_table, size);
6022     unlock_user(p, ptr, size);
6023     return size;
6024 }
6025 
6026 /* XXX: add locking support */
6027 static abi_long write_ldt(CPUX86State *env,
6028                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6029 {
6030     struct target_modify_ldt_ldt_s ldt_info;
6031     struct target_modify_ldt_ldt_s *target_ldt_info;
6032     int seg_32bit, contents, read_exec_only, limit_in_pages;
6033     int seg_not_present, useable, lm;
6034     uint32_t *lp, entry_1, entry_2;
6035 
6036     if (bytecount != sizeof(ldt_info))
6037         return -TARGET_EINVAL;
6038     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6039         return -TARGET_EFAULT;
6040     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6041     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6042     ldt_info.limit = tswap32(target_ldt_info->limit);
6043     ldt_info.flags = tswap32(target_ldt_info->flags);
6044     unlock_user_struct(target_ldt_info, ptr, 0);
6045 
6046     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6047         return -TARGET_EINVAL;
6048     seg_32bit = ldt_info.flags & 1;
6049     contents = (ldt_info.flags >> 1) & 3;
6050     read_exec_only = (ldt_info.flags >> 3) & 1;
6051     limit_in_pages = (ldt_info.flags >> 4) & 1;
6052     seg_not_present = (ldt_info.flags >> 5) & 1;
6053     useable = (ldt_info.flags >> 6) & 1;
6054 #ifdef TARGET_ABI32
6055     lm = 0;
6056 #else
6057     lm = (ldt_info.flags >> 7) & 1;
6058 #endif
6059     if (contents == 3) {
6060         if (oldmode)
6061             return -TARGET_EINVAL;
6062         if (seg_not_present == 0)
6063             return -TARGET_EINVAL;
6064     }
6065     /* allocate the LDT */
6066     if (!ldt_table) {
6067         env->ldt.base = target_mmap(0,
6068                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6069                                     PROT_READ|PROT_WRITE,
6070                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6071         if (env->ldt.base == -1)
6072             return -TARGET_ENOMEM;
6073         memset(g2h_untagged(env->ldt.base), 0,
6074                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6075         env->ldt.limit = 0xffff;
6076         ldt_table = g2h_untagged(env->ldt.base);
6077     }
6078 
6079     /* NOTE: same code as Linux kernel */
6080     /* Allow LDTs to be cleared by the user. */
6081     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6082         if (oldmode ||
6083             (contents == 0		&&
6084              read_exec_only == 1	&&
6085              seg_32bit == 0		&&
6086              limit_in_pages == 0	&&
6087              seg_not_present == 1	&&
6088              useable == 0 )) {
6089             entry_1 = 0;
6090             entry_2 = 0;
6091             goto install;
6092         }
6093     }
6094 
6095     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6096         (ldt_info.limit & 0x0ffff);
6097     entry_2 = (ldt_info.base_addr & 0xff000000) |
6098         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6099         (ldt_info.limit & 0xf0000) |
6100         ((read_exec_only ^ 1) << 9) |
6101         (contents << 10) |
6102         ((seg_not_present ^ 1) << 15) |
6103         (seg_32bit << 22) |
6104         (limit_in_pages << 23) |
6105         (lm << 21) |
6106         0x7000;
6107     if (!oldmode)
6108         entry_2 |= (useable << 20);
6109 
6110     /* Install the new entry ...  */
6111 install:
6112     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6113     lp[0] = tswap32(entry_1);
6114     lp[1] = tswap32(entry_2);
6115     return 0;
6116 }
6117 
6118 /* specific and weird i386 syscalls */
6119 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6120                               unsigned long bytecount)
6121 {
6122     abi_long ret;
6123 
6124     switch (func) {
6125     case 0:
6126         ret = read_ldt(ptr, bytecount);
6127         break;
6128     case 1:
6129         ret = write_ldt(env, ptr, bytecount, 1);
6130         break;
6131     case 0x11:
6132         ret = write_ldt(env, ptr, bytecount, 0);
6133         break;
6134     default:
6135         ret = -TARGET_ENOSYS;
6136         break;
6137     }
6138     return ret;
6139 }
6140 
6141 #if defined(TARGET_ABI32)
6142 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6143 {
6144     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6145     struct target_modify_ldt_ldt_s ldt_info;
6146     struct target_modify_ldt_ldt_s *target_ldt_info;
6147     int seg_32bit, contents, read_exec_only, limit_in_pages;
6148     int seg_not_present, useable, lm;
6149     uint32_t *lp, entry_1, entry_2;
6150     int i;
6151 
6152     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6153     if (!target_ldt_info)
6154         return -TARGET_EFAULT;
6155     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6156     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6157     ldt_info.limit = tswap32(target_ldt_info->limit);
6158     ldt_info.flags = tswap32(target_ldt_info->flags);
6159     if (ldt_info.entry_number == -1) {
6160         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6161             if (gdt_table[i] == 0) {
6162                 ldt_info.entry_number = i;
6163                 target_ldt_info->entry_number = tswap32(i);
6164                 break;
6165             }
6166         }
6167     }
6168     unlock_user_struct(target_ldt_info, ptr, 1);
6169 
6170     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6171         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6172            return -TARGET_EINVAL;
6173     seg_32bit = ldt_info.flags & 1;
6174     contents = (ldt_info.flags >> 1) & 3;
6175     read_exec_only = (ldt_info.flags >> 3) & 1;
6176     limit_in_pages = (ldt_info.flags >> 4) & 1;
6177     seg_not_present = (ldt_info.flags >> 5) & 1;
6178     useable = (ldt_info.flags >> 6) & 1;
6179 #ifdef TARGET_ABI32
6180     lm = 0;
6181 #else
6182     lm = (ldt_info.flags >> 7) & 1;
6183 #endif
6184 
6185     if (contents == 3) {
6186         if (seg_not_present == 0)
6187             return -TARGET_EINVAL;
6188     }
6189 
6190     /* NOTE: same code as Linux kernel */
6191     /* Allow LDTs to be cleared by the user. */
6192     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6193         if ((contents == 0             &&
6194              read_exec_only == 1       &&
6195              seg_32bit == 0            &&
6196              limit_in_pages == 0       &&
6197              seg_not_present == 1      &&
6198              useable == 0 )) {
6199             entry_1 = 0;
6200             entry_2 = 0;
6201             goto install;
6202         }
6203     }
6204 
6205     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6206         (ldt_info.limit & 0x0ffff);
6207     entry_2 = (ldt_info.base_addr & 0xff000000) |
6208         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6209         (ldt_info.limit & 0xf0000) |
6210         ((read_exec_only ^ 1) << 9) |
6211         (contents << 10) |
6212         ((seg_not_present ^ 1) << 15) |
6213         (seg_32bit << 22) |
6214         (limit_in_pages << 23) |
6215         (useable << 20) |
6216         (lm << 21) |
6217         0x7000;
6218 
6219     /* Install the new entry ...  */
6220 install:
6221     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6222     lp[0] = tswap32(entry_1);
6223     lp[1] = tswap32(entry_2);
6224     return 0;
6225 }
6226 
6227 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6228 {
6229     struct target_modify_ldt_ldt_s *target_ldt_info;
6230     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6231     uint32_t base_addr, limit, flags;
6232     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6233     int seg_not_present, useable, lm;
6234     uint32_t *lp, entry_1, entry_2;
6235 
6236     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6237     if (!target_ldt_info)
6238         return -TARGET_EFAULT;
6239     idx = tswap32(target_ldt_info->entry_number);
6240     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6241         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6242         unlock_user_struct(target_ldt_info, ptr, 1);
6243         return -TARGET_EINVAL;
6244     }
6245     lp = (uint32_t *)(gdt_table + idx);
6246     entry_1 = tswap32(lp[0]);
6247     entry_2 = tswap32(lp[1]);
6248 
6249     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6250     contents = (entry_2 >> 10) & 3;
6251     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6252     seg_32bit = (entry_2 >> 22) & 1;
6253     limit_in_pages = (entry_2 >> 23) & 1;
6254     useable = (entry_2 >> 20) & 1;
6255 #ifdef TARGET_ABI32
6256     lm = 0;
6257 #else
6258     lm = (entry_2 >> 21) & 1;
6259 #endif
6260     flags = (seg_32bit << 0) | (contents << 1) |
6261         (read_exec_only << 3) | (limit_in_pages << 4) |
6262         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6263     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6264     base_addr = (entry_1 >> 16) |
6265         (entry_2 & 0xff000000) |
6266         ((entry_2 & 0xff) << 16);
6267     target_ldt_info->base_addr = tswapal(base_addr);
6268     target_ldt_info->limit = tswap32(limit);
6269     target_ldt_info->flags = tswap32(flags);
6270     unlock_user_struct(target_ldt_info, ptr, 1);
6271     return 0;
6272 }
6273 
6274 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6275 {
6276     return -TARGET_ENOSYS;
6277 }
6278 #else
6279 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6280 {
6281     abi_long ret = 0;
6282     abi_ulong val;
6283     int idx;
6284 
6285     switch(code) {
6286     case TARGET_ARCH_SET_GS:
6287     case TARGET_ARCH_SET_FS:
6288         if (code == TARGET_ARCH_SET_GS)
6289             idx = R_GS;
6290         else
6291             idx = R_FS;
6292         cpu_x86_load_seg(env, idx, 0);
6293         env->segs[idx].base = addr;
6294         break;
6295     case TARGET_ARCH_GET_GS:
6296     case TARGET_ARCH_GET_FS:
6297         if (code == TARGET_ARCH_GET_GS)
6298             idx = R_GS;
6299         else
6300             idx = R_FS;
6301         val = env->segs[idx].base;
6302         if (put_user(val, addr, abi_ulong))
6303             ret = -TARGET_EFAULT;
6304         break;
6305     default:
6306         ret = -TARGET_EINVAL;
6307         break;
6308     }
6309     return ret;
6310 }
6311 #endif /* defined(TARGET_ABI32 */
6312 #endif /* defined(TARGET_I386) */
6313 
6314 /*
6315  * These constants are generic.  Supply any that are missing from the host.
6316  */
6317 #ifndef PR_SET_NAME
6318 # define PR_SET_NAME    15
6319 # define PR_GET_NAME    16
6320 #endif
6321 #ifndef PR_SET_FP_MODE
6322 # define PR_SET_FP_MODE 45
6323 # define PR_GET_FP_MODE 46
6324 # define PR_FP_MODE_FR   (1 << 0)
6325 # define PR_FP_MODE_FRE  (1 << 1)
6326 #endif
6327 #ifndef PR_SVE_SET_VL
6328 # define PR_SVE_SET_VL  50
6329 # define PR_SVE_GET_VL  51
6330 # define PR_SVE_VL_LEN_MASK  0xffff
6331 # define PR_SVE_VL_INHERIT   (1 << 17)
6332 #endif
6333 #ifndef PR_PAC_RESET_KEYS
6334 # define PR_PAC_RESET_KEYS  54
6335 # define PR_PAC_APIAKEY   (1 << 0)
6336 # define PR_PAC_APIBKEY   (1 << 1)
6337 # define PR_PAC_APDAKEY   (1 << 2)
6338 # define PR_PAC_APDBKEY   (1 << 3)
6339 # define PR_PAC_APGAKEY   (1 << 4)
6340 #endif
6341 #ifndef PR_SET_TAGGED_ADDR_CTRL
6342 # define PR_SET_TAGGED_ADDR_CTRL 55
6343 # define PR_GET_TAGGED_ADDR_CTRL 56
6344 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6345 #endif
6346 #ifndef PR_MTE_TCF_SHIFT
6347 # define PR_MTE_TCF_SHIFT       1
6348 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6349 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6350 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6351 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6352 # define PR_MTE_TAG_SHIFT       3
6353 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6354 #endif
6355 #ifndef PR_SET_IO_FLUSHER
6356 # define PR_SET_IO_FLUSHER 57
6357 # define PR_GET_IO_FLUSHER 58
6358 #endif
6359 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6360 # define PR_SET_SYSCALL_USER_DISPATCH 59
6361 #endif
6362 #ifndef PR_SME_SET_VL
6363 # define PR_SME_SET_VL  63
6364 # define PR_SME_GET_VL  64
6365 # define PR_SME_VL_LEN_MASK  0xffff
6366 # define PR_SME_VL_INHERIT   (1 << 17)
6367 #endif
6368 
6369 #include "target_prctl.h"
6370 
6371 static abi_long do_prctl_inval0(CPUArchState *env)
6372 {
6373     return -TARGET_EINVAL;
6374 }
6375 
6376 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6377 {
6378     return -TARGET_EINVAL;
6379 }
6380 
6381 #ifndef do_prctl_get_fp_mode
6382 #define do_prctl_get_fp_mode do_prctl_inval0
6383 #endif
6384 #ifndef do_prctl_set_fp_mode
6385 #define do_prctl_set_fp_mode do_prctl_inval1
6386 #endif
6387 #ifndef do_prctl_sve_get_vl
6388 #define do_prctl_sve_get_vl do_prctl_inval0
6389 #endif
6390 #ifndef do_prctl_sve_set_vl
6391 #define do_prctl_sve_set_vl do_prctl_inval1
6392 #endif
6393 #ifndef do_prctl_reset_keys
6394 #define do_prctl_reset_keys do_prctl_inval1
6395 #endif
6396 #ifndef do_prctl_set_tagged_addr_ctrl
6397 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6398 #endif
6399 #ifndef do_prctl_get_tagged_addr_ctrl
6400 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6401 #endif
6402 #ifndef do_prctl_get_unalign
6403 #define do_prctl_get_unalign do_prctl_inval1
6404 #endif
6405 #ifndef do_prctl_set_unalign
6406 #define do_prctl_set_unalign do_prctl_inval1
6407 #endif
6408 #ifndef do_prctl_sme_get_vl
6409 #define do_prctl_sme_get_vl do_prctl_inval0
6410 #endif
6411 #ifndef do_prctl_sme_set_vl
6412 #define do_prctl_sme_set_vl do_prctl_inval1
6413 #endif
6414 
6415 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6416                          abi_long arg3, abi_long arg4, abi_long arg5)
6417 {
6418     abi_long ret;
6419 
6420     switch (option) {
6421     case PR_GET_PDEATHSIG:
6422         {
6423             int deathsig;
6424             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6425                                   arg3, arg4, arg5));
6426             if (!is_error(ret) &&
6427                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6428                 return -TARGET_EFAULT;
6429             }
6430             return ret;
6431         }
6432     case PR_SET_PDEATHSIG:
6433         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6434                                arg3, arg4, arg5));
6435     case PR_GET_NAME:
6436         {
6437             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6438             if (!name) {
6439                 return -TARGET_EFAULT;
6440             }
6441             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6442                                   arg3, arg4, arg5));
6443             unlock_user(name, arg2, 16);
6444             return ret;
6445         }
6446     case PR_SET_NAME:
6447         {
6448             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6449             if (!name) {
6450                 return -TARGET_EFAULT;
6451             }
6452             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6453                                   arg3, arg4, arg5));
6454             unlock_user(name, arg2, 0);
6455             return ret;
6456         }
6457     case PR_GET_FP_MODE:
6458         return do_prctl_get_fp_mode(env);
6459     case PR_SET_FP_MODE:
6460         return do_prctl_set_fp_mode(env, arg2);
6461     case PR_SVE_GET_VL:
6462         return do_prctl_sve_get_vl(env);
6463     case PR_SVE_SET_VL:
6464         return do_prctl_sve_set_vl(env, arg2);
6465     case PR_SME_GET_VL:
6466         return do_prctl_sme_get_vl(env);
6467     case PR_SME_SET_VL:
6468         return do_prctl_sme_set_vl(env, arg2);
6469     case PR_PAC_RESET_KEYS:
6470         if (arg3 || arg4 || arg5) {
6471             return -TARGET_EINVAL;
6472         }
6473         return do_prctl_reset_keys(env, arg2);
6474     case PR_SET_TAGGED_ADDR_CTRL:
6475         if (arg3 || arg4 || arg5) {
6476             return -TARGET_EINVAL;
6477         }
6478         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6479     case PR_GET_TAGGED_ADDR_CTRL:
6480         if (arg2 || arg3 || arg4 || arg5) {
6481             return -TARGET_EINVAL;
6482         }
6483         return do_prctl_get_tagged_addr_ctrl(env);
6484 
6485     case PR_GET_UNALIGN:
6486         return do_prctl_get_unalign(env, arg2);
6487     case PR_SET_UNALIGN:
6488         return do_prctl_set_unalign(env, arg2);
6489 
6490     case PR_CAP_AMBIENT:
6491     case PR_CAPBSET_READ:
6492     case PR_CAPBSET_DROP:
6493     case PR_GET_DUMPABLE:
6494     case PR_SET_DUMPABLE:
6495     case PR_GET_KEEPCAPS:
6496     case PR_SET_KEEPCAPS:
6497     case PR_GET_SECUREBITS:
6498     case PR_SET_SECUREBITS:
6499     case PR_GET_TIMING:
6500     case PR_SET_TIMING:
6501     case PR_GET_TIMERSLACK:
6502     case PR_SET_TIMERSLACK:
6503     case PR_MCE_KILL:
6504     case PR_MCE_KILL_GET:
6505     case PR_GET_NO_NEW_PRIVS:
6506     case PR_SET_NO_NEW_PRIVS:
6507     case PR_GET_IO_FLUSHER:
6508     case PR_SET_IO_FLUSHER:
6509         /* Some prctl options have no pointer arguments and we can pass on. */
6510         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6511 
6512     case PR_GET_CHILD_SUBREAPER:
6513     case PR_SET_CHILD_SUBREAPER:
6514     case PR_GET_SPECULATION_CTRL:
6515     case PR_SET_SPECULATION_CTRL:
6516     case PR_GET_TID_ADDRESS:
6517         /* TODO */
6518         return -TARGET_EINVAL;
6519 
6520     case PR_GET_FPEXC:
6521     case PR_SET_FPEXC:
6522         /* Was used for SPE on PowerPC. */
6523         return -TARGET_EINVAL;
6524 
6525     case PR_GET_ENDIAN:
6526     case PR_SET_ENDIAN:
6527     case PR_GET_FPEMU:
6528     case PR_SET_FPEMU:
6529     case PR_SET_MM:
6530     case PR_GET_SECCOMP:
6531     case PR_SET_SECCOMP:
6532     case PR_SET_SYSCALL_USER_DISPATCH:
6533     case PR_GET_THP_DISABLE:
6534     case PR_SET_THP_DISABLE:
6535     case PR_GET_TSC:
6536     case PR_SET_TSC:
6537         /* Disable to prevent the target disabling stuff we need. */
6538         return -TARGET_EINVAL;
6539 
6540     default:
6541         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6542                       option);
6543         return -TARGET_EINVAL;
6544     }
6545 }
6546 
6547 #define NEW_STACK_SIZE 0x40000
6548 
6549 
6550 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6551 typedef struct {
6552     CPUArchState *env;
6553     pthread_mutex_t mutex;
6554     pthread_cond_t cond;
6555     pthread_t thread;
6556     uint32_t tid;
6557     abi_ulong child_tidptr;
6558     abi_ulong parent_tidptr;
6559     sigset_t sigmask;
6560 } new_thread_info;
6561 
6562 static void *clone_func(void *arg)
6563 {
6564     new_thread_info *info = arg;
6565     CPUArchState *env;
6566     CPUState *cpu;
6567     TaskState *ts;
6568 
6569     rcu_register_thread();
6570     tcg_register_thread();
6571     env = info->env;
6572     cpu = env_cpu(env);
6573     thread_cpu = cpu;
6574     ts = (TaskState *)cpu->opaque;
6575     info->tid = sys_gettid();
6576     task_settid(ts);
6577     if (info->child_tidptr)
6578         put_user_u32(info->tid, info->child_tidptr);
6579     if (info->parent_tidptr)
6580         put_user_u32(info->tid, info->parent_tidptr);
6581     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6582     /* Enable signals.  */
6583     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6584     /* Signal to the parent that we're ready.  */
6585     pthread_mutex_lock(&info->mutex);
6586     pthread_cond_broadcast(&info->cond);
6587     pthread_mutex_unlock(&info->mutex);
6588     /* Wait until the parent has finished initializing the tls state.  */
6589     pthread_mutex_lock(&clone_lock);
6590     pthread_mutex_unlock(&clone_lock);
6591     cpu_loop(env);
6592     /* never exits */
6593     return NULL;
6594 }
6595 
6596 /* do_fork() Must return host values and target errnos (unlike most
6597    do_*() functions). */
6598 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6599                    abi_ulong parent_tidptr, target_ulong newtls,
6600                    abi_ulong child_tidptr)
6601 {
6602     CPUState *cpu = env_cpu(env);
6603     int ret;
6604     TaskState *ts;
6605     CPUState *new_cpu;
6606     CPUArchState *new_env;
6607     sigset_t sigmask;
6608 
6609     flags &= ~CLONE_IGNORED_FLAGS;
6610 
6611     /* Emulate vfork() with fork() */
6612     if (flags & CLONE_VFORK)
6613         flags &= ~(CLONE_VFORK | CLONE_VM);
6614 
6615     if (flags & CLONE_VM) {
6616         TaskState *parent_ts = (TaskState *)cpu->opaque;
6617         new_thread_info info;
6618         pthread_attr_t attr;
6619 
6620         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6621             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6622             return -TARGET_EINVAL;
6623         }
6624 
6625         ts = g_new0(TaskState, 1);
6626         init_task_state(ts);
6627 
6628         /* Grab a mutex so that thread setup appears atomic.  */
6629         pthread_mutex_lock(&clone_lock);
6630 
6631         /*
6632          * If this is our first additional thread, we need to ensure we
6633          * generate code for parallel execution and flush old translations.
6634          * Do this now so that the copy gets CF_PARALLEL too.
6635          */
6636         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6637             cpu->tcg_cflags |= CF_PARALLEL;
6638             tb_flush(cpu);
6639         }
6640 
6641         /* we create a new CPU instance. */
6642         new_env = cpu_copy(env);
6643         /* Init regs that differ from the parent.  */
6644         cpu_clone_regs_child(new_env, newsp, flags);
6645         cpu_clone_regs_parent(env, flags);
6646         new_cpu = env_cpu(new_env);
6647         new_cpu->opaque = ts;
6648         ts->bprm = parent_ts->bprm;
6649         ts->info = parent_ts->info;
6650         ts->signal_mask = parent_ts->signal_mask;
6651 
6652         if (flags & CLONE_CHILD_CLEARTID) {
6653             ts->child_tidptr = child_tidptr;
6654         }
6655 
6656         if (flags & CLONE_SETTLS) {
6657             cpu_set_tls (new_env, newtls);
6658         }
6659 
6660         memset(&info, 0, sizeof(info));
6661         pthread_mutex_init(&info.mutex, NULL);
6662         pthread_mutex_lock(&info.mutex);
6663         pthread_cond_init(&info.cond, NULL);
6664         info.env = new_env;
6665         if (flags & CLONE_CHILD_SETTID) {
6666             info.child_tidptr = child_tidptr;
6667         }
6668         if (flags & CLONE_PARENT_SETTID) {
6669             info.parent_tidptr = parent_tidptr;
6670         }
6671 
6672         ret = pthread_attr_init(&attr);
6673         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6674         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6675         /* It is not safe to deliver signals until the child has finished
6676            initializing, so temporarily block all signals.  */
6677         sigfillset(&sigmask);
6678         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6679         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6680 
6681         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6682         /* TODO: Free new CPU state if thread creation failed.  */
6683 
6684         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6685         pthread_attr_destroy(&attr);
6686         if (ret == 0) {
6687             /* Wait for the child to initialize.  */
6688             pthread_cond_wait(&info.cond, &info.mutex);
6689             ret = info.tid;
6690         } else {
6691             ret = -1;
6692         }
6693         pthread_mutex_unlock(&info.mutex);
6694         pthread_cond_destroy(&info.cond);
6695         pthread_mutex_destroy(&info.mutex);
6696         pthread_mutex_unlock(&clone_lock);
6697     } else {
6698         /* if no CLONE_VM, we consider it is a fork */
6699         if (flags & CLONE_INVALID_FORK_FLAGS) {
6700             return -TARGET_EINVAL;
6701         }
6702 
6703         /* We can't support custom termination signals */
6704         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6705             return -TARGET_EINVAL;
6706         }
6707 
6708         if (block_signals()) {
6709             return -QEMU_ERESTARTSYS;
6710         }
6711 
6712         fork_start();
6713         ret = fork();
6714         if (ret == 0) {
6715             /* Child Process.  */
6716             cpu_clone_regs_child(env, newsp, flags);
6717             fork_end(1);
6718             /* There is a race condition here.  The parent process could
6719                theoretically read the TID in the child process before the child
6720                tid is set.  This would require using either ptrace
6721                (not implemented) or having *_tidptr to point at a shared memory
6722                mapping.  We can't repeat the spinlock hack used above because
6723                the child process gets its own copy of the lock.  */
6724             if (flags & CLONE_CHILD_SETTID)
6725                 put_user_u32(sys_gettid(), child_tidptr);
6726             if (flags & CLONE_PARENT_SETTID)
6727                 put_user_u32(sys_gettid(), parent_tidptr);
6728             ts = (TaskState *)cpu->opaque;
6729             if (flags & CLONE_SETTLS)
6730                 cpu_set_tls (env, newtls);
6731             if (flags & CLONE_CHILD_CLEARTID)
6732                 ts->child_tidptr = child_tidptr;
6733         } else {
6734             cpu_clone_regs_parent(env, flags);
6735             fork_end(0);
6736         }
6737     }
6738     return ret;
6739 }
6740 
6741 /* warning : doesn't handle linux specific flags... */
6742 static int target_to_host_fcntl_cmd(int cmd)
6743 {
6744     int ret;
6745 
6746     switch(cmd) {
6747     case TARGET_F_DUPFD:
6748     case TARGET_F_GETFD:
6749     case TARGET_F_SETFD:
6750     case TARGET_F_GETFL:
6751     case TARGET_F_SETFL:
6752     case TARGET_F_OFD_GETLK:
6753     case TARGET_F_OFD_SETLK:
6754     case TARGET_F_OFD_SETLKW:
6755         ret = cmd;
6756         break;
6757     case TARGET_F_GETLK:
6758         ret = F_GETLK64;
6759         break;
6760     case TARGET_F_SETLK:
6761         ret = F_SETLK64;
6762         break;
6763     case TARGET_F_SETLKW:
6764         ret = F_SETLKW64;
6765         break;
6766     case TARGET_F_GETOWN:
6767         ret = F_GETOWN;
6768         break;
6769     case TARGET_F_SETOWN:
6770         ret = F_SETOWN;
6771         break;
6772     case TARGET_F_GETSIG:
6773         ret = F_GETSIG;
6774         break;
6775     case TARGET_F_SETSIG:
6776         ret = F_SETSIG;
6777         break;
6778 #if TARGET_ABI_BITS == 32
6779     case TARGET_F_GETLK64:
6780         ret = F_GETLK64;
6781         break;
6782     case TARGET_F_SETLK64:
6783         ret = F_SETLK64;
6784         break;
6785     case TARGET_F_SETLKW64:
6786         ret = F_SETLKW64;
6787         break;
6788 #endif
6789     case TARGET_F_SETLEASE:
6790         ret = F_SETLEASE;
6791         break;
6792     case TARGET_F_GETLEASE:
6793         ret = F_GETLEASE;
6794         break;
6795 #ifdef F_DUPFD_CLOEXEC
6796     case TARGET_F_DUPFD_CLOEXEC:
6797         ret = F_DUPFD_CLOEXEC;
6798         break;
6799 #endif
6800     case TARGET_F_NOTIFY:
6801         ret = F_NOTIFY;
6802         break;
6803 #ifdef F_GETOWN_EX
6804     case TARGET_F_GETOWN_EX:
6805         ret = F_GETOWN_EX;
6806         break;
6807 #endif
6808 #ifdef F_SETOWN_EX
6809     case TARGET_F_SETOWN_EX:
6810         ret = F_SETOWN_EX;
6811         break;
6812 #endif
6813 #ifdef F_SETPIPE_SZ
6814     case TARGET_F_SETPIPE_SZ:
6815         ret = F_SETPIPE_SZ;
6816         break;
6817     case TARGET_F_GETPIPE_SZ:
6818         ret = F_GETPIPE_SZ;
6819         break;
6820 #endif
6821 #ifdef F_ADD_SEALS
6822     case TARGET_F_ADD_SEALS:
6823         ret = F_ADD_SEALS;
6824         break;
6825     case TARGET_F_GET_SEALS:
6826         ret = F_GET_SEALS;
6827         break;
6828 #endif
6829     default:
6830         ret = -TARGET_EINVAL;
6831         break;
6832     }
6833 
6834 #if defined(__powerpc64__)
6835     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6836      * is not supported by kernel. The glibc fcntl call actually adjusts
6837      * them to 5, 6 and 7 before making the syscall(). Since we make the
6838      * syscall directly, adjust to what is supported by the kernel.
6839      */
6840     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6841         ret -= F_GETLK64 - 5;
6842     }
6843 #endif
6844 
6845     return ret;
6846 }
6847 
6848 #define FLOCK_TRANSTBL \
6849     switch (type) { \
6850     TRANSTBL_CONVERT(F_RDLCK); \
6851     TRANSTBL_CONVERT(F_WRLCK); \
6852     TRANSTBL_CONVERT(F_UNLCK); \
6853     }
6854 
6855 static int target_to_host_flock(int type)
6856 {
6857 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6858     FLOCK_TRANSTBL
6859 #undef  TRANSTBL_CONVERT
6860     return -TARGET_EINVAL;
6861 }
6862 
6863 static int host_to_target_flock(int type)
6864 {
6865 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6866     FLOCK_TRANSTBL
6867 #undef  TRANSTBL_CONVERT
6868     /* if we don't know how to convert the value coming
6869      * from the host we copy to the target field as-is
6870      */
6871     return type;
6872 }
6873 
6874 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6875                                             abi_ulong target_flock_addr)
6876 {
6877     struct target_flock *target_fl;
6878     int l_type;
6879 
6880     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6881         return -TARGET_EFAULT;
6882     }
6883 
6884     __get_user(l_type, &target_fl->l_type);
6885     l_type = target_to_host_flock(l_type);
6886     if (l_type < 0) {
6887         return l_type;
6888     }
6889     fl->l_type = l_type;
6890     __get_user(fl->l_whence, &target_fl->l_whence);
6891     __get_user(fl->l_start, &target_fl->l_start);
6892     __get_user(fl->l_len, &target_fl->l_len);
6893     __get_user(fl->l_pid, &target_fl->l_pid);
6894     unlock_user_struct(target_fl, target_flock_addr, 0);
6895     return 0;
6896 }
6897 
6898 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6899                                           const struct flock64 *fl)
6900 {
6901     struct target_flock *target_fl;
6902     short l_type;
6903 
6904     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6905         return -TARGET_EFAULT;
6906     }
6907 
6908     l_type = host_to_target_flock(fl->l_type);
6909     __put_user(l_type, &target_fl->l_type);
6910     __put_user(fl->l_whence, &target_fl->l_whence);
6911     __put_user(fl->l_start, &target_fl->l_start);
6912     __put_user(fl->l_len, &target_fl->l_len);
6913     __put_user(fl->l_pid, &target_fl->l_pid);
6914     unlock_user_struct(target_fl, target_flock_addr, 1);
6915     return 0;
6916 }
6917 
6918 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6919 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6920 
6921 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6922 struct target_oabi_flock64 {
6923     abi_short l_type;
6924     abi_short l_whence;
6925     abi_llong l_start;
6926     abi_llong l_len;
6927     abi_int   l_pid;
6928 } QEMU_PACKED;
6929 
6930 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6931                                                    abi_ulong target_flock_addr)
6932 {
6933     struct target_oabi_flock64 *target_fl;
6934     int l_type;
6935 
6936     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6937         return -TARGET_EFAULT;
6938     }
6939 
6940     __get_user(l_type, &target_fl->l_type);
6941     l_type = target_to_host_flock(l_type);
6942     if (l_type < 0) {
6943         return l_type;
6944     }
6945     fl->l_type = l_type;
6946     __get_user(fl->l_whence, &target_fl->l_whence);
6947     __get_user(fl->l_start, &target_fl->l_start);
6948     __get_user(fl->l_len, &target_fl->l_len);
6949     __get_user(fl->l_pid, &target_fl->l_pid);
6950     unlock_user_struct(target_fl, target_flock_addr, 0);
6951     return 0;
6952 }
6953 
6954 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6955                                                  const struct flock64 *fl)
6956 {
6957     struct target_oabi_flock64 *target_fl;
6958     short l_type;
6959 
6960     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6961         return -TARGET_EFAULT;
6962     }
6963 
6964     l_type = host_to_target_flock(fl->l_type);
6965     __put_user(l_type, &target_fl->l_type);
6966     __put_user(fl->l_whence, &target_fl->l_whence);
6967     __put_user(fl->l_start, &target_fl->l_start);
6968     __put_user(fl->l_len, &target_fl->l_len);
6969     __put_user(fl->l_pid, &target_fl->l_pid);
6970     unlock_user_struct(target_fl, target_flock_addr, 1);
6971     return 0;
6972 }
6973 #endif
6974 
6975 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6976                                               abi_ulong target_flock_addr)
6977 {
6978     struct target_flock64 *target_fl;
6979     int l_type;
6980 
6981     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6982         return -TARGET_EFAULT;
6983     }
6984 
6985     __get_user(l_type, &target_fl->l_type);
6986     l_type = target_to_host_flock(l_type);
6987     if (l_type < 0) {
6988         return l_type;
6989     }
6990     fl->l_type = l_type;
6991     __get_user(fl->l_whence, &target_fl->l_whence);
6992     __get_user(fl->l_start, &target_fl->l_start);
6993     __get_user(fl->l_len, &target_fl->l_len);
6994     __get_user(fl->l_pid, &target_fl->l_pid);
6995     unlock_user_struct(target_fl, target_flock_addr, 0);
6996     return 0;
6997 }
6998 
6999 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7000                                             const struct flock64 *fl)
7001 {
7002     struct target_flock64 *target_fl;
7003     short l_type;
7004 
7005     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7006         return -TARGET_EFAULT;
7007     }
7008 
7009     l_type = host_to_target_flock(fl->l_type);
7010     __put_user(l_type, &target_fl->l_type);
7011     __put_user(fl->l_whence, &target_fl->l_whence);
7012     __put_user(fl->l_start, &target_fl->l_start);
7013     __put_user(fl->l_len, &target_fl->l_len);
7014     __put_user(fl->l_pid, &target_fl->l_pid);
7015     unlock_user_struct(target_fl, target_flock_addr, 1);
7016     return 0;
7017 }
7018 
7019 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7020 {
7021     struct flock64 fl64;
7022 #ifdef F_GETOWN_EX
7023     struct f_owner_ex fox;
7024     struct target_f_owner_ex *target_fox;
7025 #endif
7026     abi_long ret;
7027     int host_cmd = target_to_host_fcntl_cmd(cmd);
7028 
7029     if (host_cmd == -TARGET_EINVAL)
7030 	    return host_cmd;
7031 
7032     switch(cmd) {
7033     case TARGET_F_GETLK:
7034         ret = copy_from_user_flock(&fl64, arg);
7035         if (ret) {
7036             return ret;
7037         }
7038         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7039         if (ret == 0) {
7040             ret = copy_to_user_flock(arg, &fl64);
7041         }
7042         break;
7043 
7044     case TARGET_F_SETLK:
7045     case TARGET_F_SETLKW:
7046         ret = copy_from_user_flock(&fl64, arg);
7047         if (ret) {
7048             return ret;
7049         }
7050         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7051         break;
7052 
7053     case TARGET_F_GETLK64:
7054     case TARGET_F_OFD_GETLK:
7055         ret = copy_from_user_flock64(&fl64, arg);
7056         if (ret) {
7057             return ret;
7058         }
7059         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7060         if (ret == 0) {
7061             ret = copy_to_user_flock64(arg, &fl64);
7062         }
7063         break;
7064     case TARGET_F_SETLK64:
7065     case TARGET_F_SETLKW64:
7066     case TARGET_F_OFD_SETLK:
7067     case TARGET_F_OFD_SETLKW:
7068         ret = copy_from_user_flock64(&fl64, arg);
7069         if (ret) {
7070             return ret;
7071         }
7072         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7073         break;
7074 
7075     case TARGET_F_GETFL:
7076         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7077         if (ret >= 0) {
7078             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7079         }
7080         break;
7081 
7082     case TARGET_F_SETFL:
7083         ret = get_errno(safe_fcntl(fd, host_cmd,
7084                                    target_to_host_bitmask(arg,
7085                                                           fcntl_flags_tbl)));
7086         break;
7087 
7088 #ifdef F_GETOWN_EX
7089     case TARGET_F_GETOWN_EX:
7090         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7091         if (ret >= 0) {
7092             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7093                 return -TARGET_EFAULT;
7094             target_fox->type = tswap32(fox.type);
7095             target_fox->pid = tswap32(fox.pid);
7096             unlock_user_struct(target_fox, arg, 1);
7097         }
7098         break;
7099 #endif
7100 
7101 #ifdef F_SETOWN_EX
7102     case TARGET_F_SETOWN_EX:
7103         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7104             return -TARGET_EFAULT;
7105         fox.type = tswap32(target_fox->type);
7106         fox.pid = tswap32(target_fox->pid);
7107         unlock_user_struct(target_fox, arg, 0);
7108         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7109         break;
7110 #endif
7111 
7112     case TARGET_F_SETSIG:
7113         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7114         break;
7115 
7116     case TARGET_F_GETSIG:
7117         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7118         break;
7119 
7120     case TARGET_F_SETOWN:
7121     case TARGET_F_GETOWN:
7122     case TARGET_F_SETLEASE:
7123     case TARGET_F_GETLEASE:
7124     case TARGET_F_SETPIPE_SZ:
7125     case TARGET_F_GETPIPE_SZ:
7126     case TARGET_F_ADD_SEALS:
7127     case TARGET_F_GET_SEALS:
7128         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7129         break;
7130 
7131     default:
7132         ret = get_errno(safe_fcntl(fd, cmd, arg));
7133         break;
7134     }
7135     return ret;
7136 }
7137 
7138 #ifdef USE_UID16
7139 
7140 static inline int high2lowuid(int uid)
7141 {
7142     if (uid > 65535)
7143         return 65534;
7144     else
7145         return uid;
7146 }
7147 
7148 static inline int high2lowgid(int gid)
7149 {
7150     if (gid > 65535)
7151         return 65534;
7152     else
7153         return gid;
7154 }
7155 
7156 static inline int low2highuid(int uid)
7157 {
7158     if ((int16_t)uid == -1)
7159         return -1;
7160     else
7161         return uid;
7162 }
7163 
7164 static inline int low2highgid(int gid)
7165 {
7166     if ((int16_t)gid == -1)
7167         return -1;
7168     else
7169         return gid;
7170 }
7171 static inline int tswapid(int id)
7172 {
7173     return tswap16(id);
7174 }
7175 
7176 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7177 
7178 #else /* !USE_UID16 */
7179 static inline int high2lowuid(int uid)
7180 {
7181     return uid;
7182 }
7183 static inline int high2lowgid(int gid)
7184 {
7185     return gid;
7186 }
7187 static inline int low2highuid(int uid)
7188 {
7189     return uid;
7190 }
7191 static inline int low2highgid(int gid)
7192 {
7193     return gid;
7194 }
7195 static inline int tswapid(int id)
7196 {
7197     return tswap32(id);
7198 }
7199 
7200 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7201 
7202 #endif /* USE_UID16 */
7203 
7204 /* We must do direct syscalls for setting UID/GID, because we want to
7205  * implement the Linux system call semantics of "change only for this thread",
7206  * not the libc/POSIX semantics of "change for all threads in process".
7207  * (See http://ewontfix.com/17/ for more details.)
7208  * We use the 32-bit version of the syscalls if present; if it is not
7209  * then either the host architecture supports 32-bit UIDs natively with
7210  * the standard syscall, or the 16-bit UID is the best we can do.
7211  */
7212 #ifdef __NR_setuid32
7213 #define __NR_sys_setuid __NR_setuid32
7214 #else
7215 #define __NR_sys_setuid __NR_setuid
7216 #endif
7217 #ifdef __NR_setgid32
7218 #define __NR_sys_setgid __NR_setgid32
7219 #else
7220 #define __NR_sys_setgid __NR_setgid
7221 #endif
7222 #ifdef __NR_setresuid32
7223 #define __NR_sys_setresuid __NR_setresuid32
7224 #else
7225 #define __NR_sys_setresuid __NR_setresuid
7226 #endif
7227 #ifdef __NR_setresgid32
7228 #define __NR_sys_setresgid __NR_setresgid32
7229 #else
7230 #define __NR_sys_setresgid __NR_setresgid
7231 #endif
7232 
7233 _syscall1(int, sys_setuid, uid_t, uid)
7234 _syscall1(int, sys_setgid, gid_t, gid)
7235 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7236 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7237 
7238 void syscall_init(void)
7239 {
7240     IOCTLEntry *ie;
7241     const argtype *arg_type;
7242     int size;
7243 
7244     thunk_init(STRUCT_MAX);
7245 
7246 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7247 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7248 #include "syscall_types.h"
7249 #undef STRUCT
7250 #undef STRUCT_SPECIAL
7251 
7252     /* we patch the ioctl size if necessary. We rely on the fact that
7253        no ioctl has all the bits at '1' in the size field */
7254     ie = ioctl_entries;
7255     while (ie->target_cmd != 0) {
7256         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7257             TARGET_IOC_SIZEMASK) {
7258             arg_type = ie->arg_type;
7259             if (arg_type[0] != TYPE_PTR) {
7260                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7261                         ie->target_cmd);
7262                 exit(1);
7263             }
7264             arg_type++;
7265             size = thunk_type_size(arg_type, 0);
7266             ie->target_cmd = (ie->target_cmd &
7267                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7268                 (size << TARGET_IOC_SIZESHIFT);
7269         }
7270 
7271         /* automatic consistency check if same arch */
7272 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7273     (defined(__x86_64__) && defined(TARGET_X86_64))
7274         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7275             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7276                     ie->name, ie->target_cmd, ie->host_cmd);
7277         }
7278 #endif
7279         ie++;
7280     }
7281 }
7282 
7283 #ifdef TARGET_NR_truncate64
7284 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7285                                          abi_long arg2,
7286                                          abi_long arg3,
7287                                          abi_long arg4)
7288 {
7289     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7290         arg2 = arg3;
7291         arg3 = arg4;
7292     }
7293     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7294 }
7295 #endif
7296 
7297 #ifdef TARGET_NR_ftruncate64
7298 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7299                                           abi_long arg2,
7300                                           abi_long arg3,
7301                                           abi_long arg4)
7302 {
7303     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7304         arg2 = arg3;
7305         arg3 = arg4;
7306     }
7307     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7308 }
7309 #endif
7310 
7311 #if defined(TARGET_NR_timer_settime) || \
7312     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7313 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7314                                                  abi_ulong target_addr)
7315 {
7316     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7317                                 offsetof(struct target_itimerspec,
7318                                          it_interval)) ||
7319         target_to_host_timespec(&host_its->it_value, target_addr +
7320                                 offsetof(struct target_itimerspec,
7321                                          it_value))) {
7322         return -TARGET_EFAULT;
7323     }
7324 
7325     return 0;
7326 }
7327 #endif
7328 
7329 #if defined(TARGET_NR_timer_settime64) || \
7330     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7331 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7332                                                    abi_ulong target_addr)
7333 {
7334     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7335                                   offsetof(struct target__kernel_itimerspec,
7336                                            it_interval)) ||
7337         target_to_host_timespec64(&host_its->it_value, target_addr +
7338                                   offsetof(struct target__kernel_itimerspec,
7339                                            it_value))) {
7340         return -TARGET_EFAULT;
7341     }
7342 
7343     return 0;
7344 }
7345 #endif
7346 
7347 #if ((defined(TARGET_NR_timerfd_gettime) || \
7348       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7349       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7350 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7351                                                  struct itimerspec *host_its)
7352 {
7353     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7354                                                        it_interval),
7355                                 &host_its->it_interval) ||
7356         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7357                                                        it_value),
7358                                 &host_its->it_value)) {
7359         return -TARGET_EFAULT;
7360     }
7361     return 0;
7362 }
7363 #endif
7364 
7365 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7366       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7367       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7368 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7369                                                    struct itimerspec *host_its)
7370 {
7371     if (host_to_target_timespec64(target_addr +
7372                                   offsetof(struct target__kernel_itimerspec,
7373                                            it_interval),
7374                                   &host_its->it_interval) ||
7375         host_to_target_timespec64(target_addr +
7376                                   offsetof(struct target__kernel_itimerspec,
7377                                            it_value),
7378                                   &host_its->it_value)) {
7379         return -TARGET_EFAULT;
7380     }
7381     return 0;
7382 }
7383 #endif
7384 
7385 #if defined(TARGET_NR_adjtimex) || \
7386     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7387 static inline abi_long target_to_host_timex(struct timex *host_tx,
7388                                             abi_long target_addr)
7389 {
7390     struct target_timex *target_tx;
7391 
7392     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7393         return -TARGET_EFAULT;
7394     }
7395 
7396     __get_user(host_tx->modes, &target_tx->modes);
7397     __get_user(host_tx->offset, &target_tx->offset);
7398     __get_user(host_tx->freq, &target_tx->freq);
7399     __get_user(host_tx->maxerror, &target_tx->maxerror);
7400     __get_user(host_tx->esterror, &target_tx->esterror);
7401     __get_user(host_tx->status, &target_tx->status);
7402     __get_user(host_tx->constant, &target_tx->constant);
7403     __get_user(host_tx->precision, &target_tx->precision);
7404     __get_user(host_tx->tolerance, &target_tx->tolerance);
7405     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7406     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7407     __get_user(host_tx->tick, &target_tx->tick);
7408     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7409     __get_user(host_tx->jitter, &target_tx->jitter);
7410     __get_user(host_tx->shift, &target_tx->shift);
7411     __get_user(host_tx->stabil, &target_tx->stabil);
7412     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7413     __get_user(host_tx->calcnt, &target_tx->calcnt);
7414     __get_user(host_tx->errcnt, &target_tx->errcnt);
7415     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7416     __get_user(host_tx->tai, &target_tx->tai);
7417 
7418     unlock_user_struct(target_tx, target_addr, 0);
7419     return 0;
7420 }
7421 
7422 static inline abi_long host_to_target_timex(abi_long target_addr,
7423                                             struct timex *host_tx)
7424 {
7425     struct target_timex *target_tx;
7426 
7427     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7428         return -TARGET_EFAULT;
7429     }
7430 
7431     __put_user(host_tx->modes, &target_tx->modes);
7432     __put_user(host_tx->offset, &target_tx->offset);
7433     __put_user(host_tx->freq, &target_tx->freq);
7434     __put_user(host_tx->maxerror, &target_tx->maxerror);
7435     __put_user(host_tx->esterror, &target_tx->esterror);
7436     __put_user(host_tx->status, &target_tx->status);
7437     __put_user(host_tx->constant, &target_tx->constant);
7438     __put_user(host_tx->precision, &target_tx->precision);
7439     __put_user(host_tx->tolerance, &target_tx->tolerance);
7440     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7441     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7442     __put_user(host_tx->tick, &target_tx->tick);
7443     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7444     __put_user(host_tx->jitter, &target_tx->jitter);
7445     __put_user(host_tx->shift, &target_tx->shift);
7446     __put_user(host_tx->stabil, &target_tx->stabil);
7447     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7448     __put_user(host_tx->calcnt, &target_tx->calcnt);
7449     __put_user(host_tx->errcnt, &target_tx->errcnt);
7450     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7451     __put_user(host_tx->tai, &target_tx->tai);
7452 
7453     unlock_user_struct(target_tx, target_addr, 1);
7454     return 0;
7455 }
7456 #endif
7457 
7458 
7459 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7460 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7461                                               abi_long target_addr)
7462 {
7463     struct target__kernel_timex *target_tx;
7464 
7465     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7466                                  offsetof(struct target__kernel_timex,
7467                                           time))) {
7468         return -TARGET_EFAULT;
7469     }
7470 
7471     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7472         return -TARGET_EFAULT;
7473     }
7474 
7475     __get_user(host_tx->modes, &target_tx->modes);
7476     __get_user(host_tx->offset, &target_tx->offset);
7477     __get_user(host_tx->freq, &target_tx->freq);
7478     __get_user(host_tx->maxerror, &target_tx->maxerror);
7479     __get_user(host_tx->esterror, &target_tx->esterror);
7480     __get_user(host_tx->status, &target_tx->status);
7481     __get_user(host_tx->constant, &target_tx->constant);
7482     __get_user(host_tx->precision, &target_tx->precision);
7483     __get_user(host_tx->tolerance, &target_tx->tolerance);
7484     __get_user(host_tx->tick, &target_tx->tick);
7485     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7486     __get_user(host_tx->jitter, &target_tx->jitter);
7487     __get_user(host_tx->shift, &target_tx->shift);
7488     __get_user(host_tx->stabil, &target_tx->stabil);
7489     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7490     __get_user(host_tx->calcnt, &target_tx->calcnt);
7491     __get_user(host_tx->errcnt, &target_tx->errcnt);
7492     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7493     __get_user(host_tx->tai, &target_tx->tai);
7494 
7495     unlock_user_struct(target_tx, target_addr, 0);
7496     return 0;
7497 }
7498 
7499 static inline abi_long host_to_target_timex64(abi_long target_addr,
7500                                               struct timex *host_tx)
7501 {
7502     struct target__kernel_timex *target_tx;
7503 
7504    if (copy_to_user_timeval64(target_addr +
7505                               offsetof(struct target__kernel_timex, time),
7506                               &host_tx->time)) {
7507         return -TARGET_EFAULT;
7508     }
7509 
7510     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7511         return -TARGET_EFAULT;
7512     }
7513 
7514     __put_user(host_tx->modes, &target_tx->modes);
7515     __put_user(host_tx->offset, &target_tx->offset);
7516     __put_user(host_tx->freq, &target_tx->freq);
7517     __put_user(host_tx->maxerror, &target_tx->maxerror);
7518     __put_user(host_tx->esterror, &target_tx->esterror);
7519     __put_user(host_tx->status, &target_tx->status);
7520     __put_user(host_tx->constant, &target_tx->constant);
7521     __put_user(host_tx->precision, &target_tx->precision);
7522     __put_user(host_tx->tolerance, &target_tx->tolerance);
7523     __put_user(host_tx->tick, &target_tx->tick);
7524     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7525     __put_user(host_tx->jitter, &target_tx->jitter);
7526     __put_user(host_tx->shift, &target_tx->shift);
7527     __put_user(host_tx->stabil, &target_tx->stabil);
7528     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7529     __put_user(host_tx->calcnt, &target_tx->calcnt);
7530     __put_user(host_tx->errcnt, &target_tx->errcnt);
7531     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7532     __put_user(host_tx->tai, &target_tx->tai);
7533 
7534     unlock_user_struct(target_tx, target_addr, 1);
7535     return 0;
7536 }
7537 #endif
7538 
7539 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7540 #define sigev_notify_thread_id _sigev_un._tid
7541 #endif
7542 
7543 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7544                                                abi_ulong target_addr)
7545 {
7546     struct target_sigevent *target_sevp;
7547 
7548     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7549         return -TARGET_EFAULT;
7550     }
7551 
7552     /* This union is awkward on 64 bit systems because it has a 32 bit
7553      * integer and a pointer in it; we follow the conversion approach
7554      * used for handling sigval types in signal.c so the guest should get
7555      * the correct value back even if we did a 64 bit byteswap and it's
7556      * using the 32 bit integer.
7557      */
7558     host_sevp->sigev_value.sival_ptr =
7559         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7560     host_sevp->sigev_signo =
7561         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7562     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7563     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7564 
7565     unlock_user_struct(target_sevp, target_addr, 1);
7566     return 0;
7567 }
7568 
7569 #if defined(TARGET_NR_mlockall)
7570 static inline int target_to_host_mlockall_arg(int arg)
7571 {
7572     int result = 0;
7573 
7574     if (arg & TARGET_MCL_CURRENT) {
7575         result |= MCL_CURRENT;
7576     }
7577     if (arg & TARGET_MCL_FUTURE) {
7578         result |= MCL_FUTURE;
7579     }
7580 #ifdef MCL_ONFAULT
7581     if (arg & TARGET_MCL_ONFAULT) {
7582         result |= MCL_ONFAULT;
7583     }
7584 #endif
7585 
7586     return result;
7587 }
7588 #endif
7589 
7590 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7591      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7592      defined(TARGET_NR_newfstatat))
7593 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7594                                              abi_ulong target_addr,
7595                                              struct stat *host_st)
7596 {
7597 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7598     if (cpu_env->eabi) {
7599         struct target_eabi_stat64 *target_st;
7600 
7601         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7602             return -TARGET_EFAULT;
7603         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7604         __put_user(host_st->st_dev, &target_st->st_dev);
7605         __put_user(host_st->st_ino, &target_st->st_ino);
7606 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7607         __put_user(host_st->st_ino, &target_st->__st_ino);
7608 #endif
7609         __put_user(host_st->st_mode, &target_st->st_mode);
7610         __put_user(host_st->st_nlink, &target_st->st_nlink);
7611         __put_user(host_st->st_uid, &target_st->st_uid);
7612         __put_user(host_st->st_gid, &target_st->st_gid);
7613         __put_user(host_st->st_rdev, &target_st->st_rdev);
7614         __put_user(host_st->st_size, &target_st->st_size);
7615         __put_user(host_st->st_blksize, &target_st->st_blksize);
7616         __put_user(host_st->st_blocks, &target_st->st_blocks);
7617         __put_user(host_st->st_atime, &target_st->target_st_atime);
7618         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7619         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7620 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7621         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7622         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7623         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7624 #endif
7625         unlock_user_struct(target_st, target_addr, 1);
7626     } else
7627 #endif
7628     {
7629 #if defined(TARGET_HAS_STRUCT_STAT64)
7630         struct target_stat64 *target_st;
7631 #else
7632         struct target_stat *target_st;
7633 #endif
7634 
7635         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7636             return -TARGET_EFAULT;
7637         memset(target_st, 0, sizeof(*target_st));
7638         __put_user(host_st->st_dev, &target_st->st_dev);
7639         __put_user(host_st->st_ino, &target_st->st_ino);
7640 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7641         __put_user(host_st->st_ino, &target_st->__st_ino);
7642 #endif
7643         __put_user(host_st->st_mode, &target_st->st_mode);
7644         __put_user(host_st->st_nlink, &target_st->st_nlink);
7645         __put_user(host_st->st_uid, &target_st->st_uid);
7646         __put_user(host_st->st_gid, &target_st->st_gid);
7647         __put_user(host_st->st_rdev, &target_st->st_rdev);
7648         /* XXX: better use of kernel struct */
7649         __put_user(host_st->st_size, &target_st->st_size);
7650         __put_user(host_st->st_blksize, &target_st->st_blksize);
7651         __put_user(host_st->st_blocks, &target_st->st_blocks);
7652         __put_user(host_st->st_atime, &target_st->target_st_atime);
7653         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7654         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7655 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7656         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7657         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7658         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7659 #endif
7660         unlock_user_struct(target_st, target_addr, 1);
7661     }
7662 
7663     return 0;
7664 }
7665 #endif
7666 
7667 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7668 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7669                                             abi_ulong target_addr)
7670 {
7671     struct target_statx *target_stx;
7672 
7673     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7674         return -TARGET_EFAULT;
7675     }
7676     memset(target_stx, 0, sizeof(*target_stx));
7677 
7678     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7679     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7680     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7681     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7682     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7683     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7684     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7685     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7686     __put_user(host_stx->stx_size, &target_stx->stx_size);
7687     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7688     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7689     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7690     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7691     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7692     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7693     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7694     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7695     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7696     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7697     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7698     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7699     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7700     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7701 
7702     unlock_user_struct(target_stx, target_addr, 1);
7703 
7704     return 0;
7705 }
7706 #endif
7707 
7708 static int do_sys_futex(int *uaddr, int op, int val,
7709                          const struct timespec *timeout, int *uaddr2,
7710                          int val3)
7711 {
7712 #if HOST_LONG_BITS == 64
7713 #if defined(__NR_futex)
7714     /* always a 64-bit time_t, it doesn't define _time64 version  */
7715     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7716 
7717 #endif
7718 #else /* HOST_LONG_BITS == 64 */
7719 #if defined(__NR_futex_time64)
7720     if (sizeof(timeout->tv_sec) == 8) {
7721         /* _time64 function on 32bit arch */
7722         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7723     }
7724 #endif
7725 #if defined(__NR_futex)
7726     /* old function on 32bit arch */
7727     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7728 #endif
7729 #endif /* HOST_LONG_BITS == 64 */
7730     g_assert_not_reached();
7731 }
7732 
7733 static int do_safe_futex(int *uaddr, int op, int val,
7734                          const struct timespec *timeout, int *uaddr2,
7735                          int val3)
7736 {
7737 #if HOST_LONG_BITS == 64
7738 #if defined(__NR_futex)
7739     /* always a 64-bit time_t, it doesn't define _time64 version  */
7740     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7741 #endif
7742 #else /* HOST_LONG_BITS == 64 */
7743 #if defined(__NR_futex_time64)
7744     if (sizeof(timeout->tv_sec) == 8) {
7745         /* _time64 function on 32bit arch */
7746         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7747                                            val3));
7748     }
7749 #endif
7750 #if defined(__NR_futex)
7751     /* old function on 32bit arch */
7752     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7753 #endif
7754 #endif /* HOST_LONG_BITS == 64 */
7755     return -TARGET_ENOSYS;
7756 }
7757 
7758 /* ??? Using host futex calls even when target atomic operations
7759    are not really atomic probably breaks things.  However implementing
7760    futexes locally would make futexes shared between multiple processes
7761    tricky.  However they're probably useless because guest atomic
7762    operations won't work either.  */
7763 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7764 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7765                     int op, int val, target_ulong timeout,
7766                     target_ulong uaddr2, int val3)
7767 {
7768     struct timespec ts, *pts = NULL;
7769     void *haddr2 = NULL;
7770     int base_op;
7771 
7772     /* We assume FUTEX_* constants are the same on both host and target. */
7773 #ifdef FUTEX_CMD_MASK
7774     base_op = op & FUTEX_CMD_MASK;
7775 #else
7776     base_op = op;
7777 #endif
7778     switch (base_op) {
7779     case FUTEX_WAIT:
7780     case FUTEX_WAIT_BITSET:
7781         val = tswap32(val);
7782         break;
7783     case FUTEX_WAIT_REQUEUE_PI:
7784         val = tswap32(val);
7785         haddr2 = g2h(cpu, uaddr2);
7786         break;
7787     case FUTEX_LOCK_PI:
7788     case FUTEX_LOCK_PI2:
7789         break;
7790     case FUTEX_WAKE:
7791     case FUTEX_WAKE_BITSET:
7792     case FUTEX_TRYLOCK_PI:
7793     case FUTEX_UNLOCK_PI:
7794         timeout = 0;
7795         break;
7796     case FUTEX_FD:
7797         val = target_to_host_signal(val);
7798         timeout = 0;
7799         break;
7800     case FUTEX_CMP_REQUEUE:
7801     case FUTEX_CMP_REQUEUE_PI:
7802         val3 = tswap32(val3);
7803         /* fall through */
7804     case FUTEX_REQUEUE:
7805     case FUTEX_WAKE_OP:
7806         /*
7807          * For these, the 4th argument is not TIMEOUT, but VAL2.
7808          * But the prototype of do_safe_futex takes a pointer, so
7809          * insert casts to satisfy the compiler.  We do not need
7810          * to tswap VAL2 since it's not compared to guest memory.
7811           */
7812         pts = (struct timespec *)(uintptr_t)timeout;
7813         timeout = 0;
7814         haddr2 = g2h(cpu, uaddr2);
7815         break;
7816     default:
7817         return -TARGET_ENOSYS;
7818     }
7819     if (timeout) {
7820         pts = &ts;
7821         if (time64
7822             ? target_to_host_timespec64(pts, timeout)
7823             : target_to_host_timespec(pts, timeout)) {
7824             return -TARGET_EFAULT;
7825         }
7826     }
7827     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7828 }
7829 #endif
7830 
7831 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7832 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7833                                      abi_long handle, abi_long mount_id,
7834                                      abi_long flags)
7835 {
7836     struct file_handle *target_fh;
7837     struct file_handle *fh;
7838     int mid = 0;
7839     abi_long ret;
7840     char *name;
7841     unsigned int size, total_size;
7842 
7843     if (get_user_s32(size, handle)) {
7844         return -TARGET_EFAULT;
7845     }
7846 
7847     name = lock_user_string(pathname);
7848     if (!name) {
7849         return -TARGET_EFAULT;
7850     }
7851 
7852     total_size = sizeof(struct file_handle) + size;
7853     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7854     if (!target_fh) {
7855         unlock_user(name, pathname, 0);
7856         return -TARGET_EFAULT;
7857     }
7858 
7859     fh = g_malloc0(total_size);
7860     fh->handle_bytes = size;
7861 
7862     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7863     unlock_user(name, pathname, 0);
7864 
7865     /* man name_to_handle_at(2):
7866      * Other than the use of the handle_bytes field, the caller should treat
7867      * the file_handle structure as an opaque data type
7868      */
7869 
7870     memcpy(target_fh, fh, total_size);
7871     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7872     target_fh->handle_type = tswap32(fh->handle_type);
7873     g_free(fh);
7874     unlock_user(target_fh, handle, total_size);
7875 
7876     if (put_user_s32(mid, mount_id)) {
7877         return -TARGET_EFAULT;
7878     }
7879 
7880     return ret;
7881 
7882 }
7883 #endif
7884 
7885 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7886 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7887                                      abi_long flags)
7888 {
7889     struct file_handle *target_fh;
7890     struct file_handle *fh;
7891     unsigned int size, total_size;
7892     abi_long ret;
7893 
7894     if (get_user_s32(size, handle)) {
7895         return -TARGET_EFAULT;
7896     }
7897 
7898     total_size = sizeof(struct file_handle) + size;
7899     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7900     if (!target_fh) {
7901         return -TARGET_EFAULT;
7902     }
7903 
7904     fh = g_memdup(target_fh, total_size);
7905     fh->handle_bytes = size;
7906     fh->handle_type = tswap32(target_fh->handle_type);
7907 
7908     ret = get_errno(open_by_handle_at(mount_fd, fh,
7909                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7910 
7911     g_free(fh);
7912 
7913     unlock_user(target_fh, handle, total_size);
7914 
7915     return ret;
7916 }
7917 #endif
7918 
7919 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7920 
7921 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7922 {
7923     int host_flags;
7924     target_sigset_t *target_mask;
7925     sigset_t host_mask;
7926     abi_long ret;
7927 
7928     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7929         return -TARGET_EINVAL;
7930     }
7931     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7932         return -TARGET_EFAULT;
7933     }
7934 
7935     target_to_host_sigset(&host_mask, target_mask);
7936 
7937     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7938 
7939     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7940     if (ret >= 0) {
7941         fd_trans_register(ret, &target_signalfd_trans);
7942     }
7943 
7944     unlock_user_struct(target_mask, mask, 0);
7945 
7946     return ret;
7947 }
7948 #endif
7949 
7950 /* Map host to target signal numbers for the wait family of syscalls.
7951    Assume all other status bits are the same.  */
7952 int host_to_target_waitstatus(int status)
7953 {
7954     if (WIFSIGNALED(status)) {
7955         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7956     }
7957     if (WIFSTOPPED(status)) {
7958         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7959                | (status & 0xff);
7960     }
7961     return status;
7962 }
7963 
7964 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7965 {
7966     CPUState *cpu = env_cpu(cpu_env);
7967     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7968     int i;
7969 
7970     for (i = 0; i < bprm->argc; i++) {
7971         size_t len = strlen(bprm->argv[i]) + 1;
7972 
7973         if (write(fd, bprm->argv[i], len) != len) {
7974             return -1;
7975         }
7976     }
7977 
7978     return 0;
7979 }
7980 
7981 static int open_self_maps(CPUArchState *cpu_env, int fd)
7982 {
7983     CPUState *cpu = env_cpu(cpu_env);
7984     TaskState *ts = cpu->opaque;
7985     GSList *map_info = read_self_maps();
7986     GSList *s;
7987     int count;
7988 
7989     for (s = map_info; s; s = g_slist_next(s)) {
7990         MapInfo *e = (MapInfo *) s->data;
7991 
7992         if (h2g_valid(e->start)) {
7993             unsigned long min = e->start;
7994             unsigned long max = e->end;
7995             int flags = page_get_flags(h2g(min));
7996             const char *path;
7997 
7998             max = h2g_valid(max - 1) ?
7999                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8000 
8001             if (page_check_range(h2g(min), max - min, flags) == -1) {
8002                 continue;
8003             }
8004 
8005 #ifdef TARGET_HPPA
8006             if (h2g(max) == ts->info->stack_limit) {
8007 #else
8008             if (h2g(min) == ts->info->stack_limit) {
8009 #endif
8010                 path = "[stack]";
8011             } else {
8012                 path = e->path;
8013             }
8014 
8015             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8016                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8017                             h2g(min), h2g(max - 1) + 1,
8018                             (flags & PAGE_READ) ? 'r' : '-',
8019                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8020                             (flags & PAGE_EXEC) ? 'x' : '-',
8021                             e->is_priv ? 'p' : 's',
8022                             (uint64_t) e->offset, e->dev, e->inode);
8023             if (path) {
8024                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8025             } else {
8026                 dprintf(fd, "\n");
8027             }
8028         }
8029     }
8030 
8031     free_self_maps(map_info);
8032 
8033 #ifdef TARGET_VSYSCALL_PAGE
8034     /*
8035      * We only support execution from the vsyscall page.
8036      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8037      */
8038     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8039                     " --xp 00000000 00:00 0",
8040                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8041     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8042 #endif
8043 
8044     return 0;
8045 }
8046 
8047 static int open_self_stat(CPUArchState *cpu_env, int fd)
8048 {
8049     CPUState *cpu = env_cpu(cpu_env);
8050     TaskState *ts = cpu->opaque;
8051     g_autoptr(GString) buf = g_string_new(NULL);
8052     int i;
8053 
8054     for (i = 0; i < 44; i++) {
8055         if (i == 0) {
8056             /* pid */
8057             g_string_printf(buf, FMT_pid " ", getpid());
8058         } else if (i == 1) {
8059             /* app name */
8060             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8061             bin = bin ? bin + 1 : ts->bprm->argv[0];
8062             g_string_printf(buf, "(%.15s) ", bin);
8063         } else if (i == 3) {
8064             /* ppid */
8065             g_string_printf(buf, FMT_pid " ", getppid());
8066         } else if (i == 21) {
8067             /* starttime */
8068             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8069         } else if (i == 27) {
8070             /* stack bottom */
8071             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8072         } else {
8073             /* for the rest, there is MasterCard */
8074             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8075         }
8076 
8077         if (write(fd, buf->str, buf->len) != buf->len) {
8078             return -1;
8079         }
8080     }
8081 
8082     return 0;
8083 }
8084 
8085 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8086 {
8087     CPUState *cpu = env_cpu(cpu_env);
8088     TaskState *ts = cpu->opaque;
8089     abi_ulong auxv = ts->info->saved_auxv;
8090     abi_ulong len = ts->info->auxv_len;
8091     char *ptr;
8092 
8093     /*
8094      * Auxiliary vector is stored in target process stack.
8095      * read in whole auxv vector and copy it to file
8096      */
8097     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8098     if (ptr != NULL) {
8099         while (len > 0) {
8100             ssize_t r;
8101             r = write(fd, ptr, len);
8102             if (r <= 0) {
8103                 break;
8104             }
8105             len -= r;
8106             ptr += r;
8107         }
8108         lseek(fd, 0, SEEK_SET);
8109         unlock_user(ptr, auxv, len);
8110     }
8111 
8112     return 0;
8113 }
8114 
8115 static int is_proc_myself(const char *filename, const char *entry)
8116 {
8117     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8118         filename += strlen("/proc/");
8119         if (!strncmp(filename, "self/", strlen("self/"))) {
8120             filename += strlen("self/");
8121         } else if (*filename >= '1' && *filename <= '9') {
8122             char myself[80];
8123             snprintf(myself, sizeof(myself), "%d/", getpid());
8124             if (!strncmp(filename, myself, strlen(myself))) {
8125                 filename += strlen(myself);
8126             } else {
8127                 return 0;
8128             }
8129         } else {
8130             return 0;
8131         }
8132         if (!strcmp(filename, entry)) {
8133             return 1;
8134         }
8135     }
8136     return 0;
8137 }
8138 
8139 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8140                       const char *fmt, int code)
8141 {
8142     if (logfile) {
8143         CPUState *cs = env_cpu(env);
8144 
8145         fprintf(logfile, fmt, code);
8146         fprintf(logfile, "Failing executable: %s\n", exec_path);
8147         cpu_dump_state(cs, logfile, 0);
8148         open_self_maps(env, fileno(logfile));
8149     }
8150 }
8151 
8152 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8153 {
8154     /* dump to console */
8155     excp_dump_file(stderr, env, fmt, code);
8156 
8157     /* dump to log file */
8158     if (qemu_log_separate()) {
8159         FILE *logfile = qemu_log_trylock();
8160 
8161         excp_dump_file(logfile, env, fmt, code);
8162         qemu_log_unlock(logfile);
8163     }
8164 }
8165 
8166 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8167     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8168 static int is_proc(const char *filename, const char *entry)
8169 {
8170     return strcmp(filename, entry) == 0;
8171 }
8172 #endif
8173 
8174 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8175 static int open_net_route(CPUArchState *cpu_env, int fd)
8176 {
8177     FILE *fp;
8178     char *line = NULL;
8179     size_t len = 0;
8180     ssize_t read;
8181 
8182     fp = fopen("/proc/net/route", "r");
8183     if (fp == NULL) {
8184         return -1;
8185     }
8186 
8187     /* read header */
8188 
8189     read = getline(&line, &len, fp);
8190     dprintf(fd, "%s", line);
8191 
8192     /* read routes */
8193 
8194     while ((read = getline(&line, &len, fp)) != -1) {
8195         char iface[16];
8196         uint32_t dest, gw, mask;
8197         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8198         int fields;
8199 
8200         fields = sscanf(line,
8201                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8202                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8203                         &mask, &mtu, &window, &irtt);
8204         if (fields != 11) {
8205             continue;
8206         }
8207         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8208                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8209                 metric, tswap32(mask), mtu, window, irtt);
8210     }
8211 
8212     free(line);
8213     fclose(fp);
8214 
8215     return 0;
8216 }
8217 #endif
8218 
8219 #if defined(TARGET_SPARC)
8220 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8221 {
8222     dprintf(fd, "type\t\t: sun4u\n");
8223     return 0;
8224 }
8225 #endif
8226 
8227 #if defined(TARGET_HPPA)
8228 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8229 {
8230     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8231     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8232     dprintf(fd, "capabilities\t: os32\n");
8233     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8234     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8235     return 0;
8236 }
8237 #endif
8238 
8239 #if defined(TARGET_M68K)
8240 static int open_hardware(CPUArchState *cpu_env, int fd)
8241 {
8242     dprintf(fd, "Model:\t\tqemu-m68k\n");
8243     return 0;
8244 }
8245 #endif
8246 
8247 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8248 {
8249     struct fake_open {
8250         const char *filename;
8251         int (*fill)(CPUArchState *cpu_env, int fd);
8252         int (*cmp)(const char *s1, const char *s2);
8253     };
8254     const struct fake_open *fake_open;
8255     static const struct fake_open fakes[] = {
8256         { "maps", open_self_maps, is_proc_myself },
8257         { "stat", open_self_stat, is_proc_myself },
8258         { "auxv", open_self_auxv, is_proc_myself },
8259         { "cmdline", open_self_cmdline, is_proc_myself },
8260 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8261         { "/proc/net/route", open_net_route, is_proc },
8262 #endif
8263 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8264         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8265 #endif
8266 #if defined(TARGET_M68K)
8267         { "/proc/hardware", open_hardware, is_proc },
8268 #endif
8269         { NULL, NULL, NULL }
8270     };
8271 
8272     if (is_proc_myself(pathname, "exe")) {
8273         return safe_openat(dirfd, exec_path, flags, mode);
8274     }
8275 
8276     for (fake_open = fakes; fake_open->filename; fake_open++) {
8277         if (fake_open->cmp(pathname, fake_open->filename)) {
8278             break;
8279         }
8280     }
8281 
8282     if (fake_open->filename) {
8283         const char *tmpdir;
8284         char filename[PATH_MAX];
8285         int fd, r;
8286 
8287         fd = memfd_create("qemu-open", 0);
8288         if (fd < 0) {
8289             if (errno != ENOSYS) {
8290                 return fd;
8291             }
8292             /* create temporary file to map stat to */
8293             tmpdir = getenv("TMPDIR");
8294             if (!tmpdir)
8295                 tmpdir = "/tmp";
8296             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8297             fd = mkstemp(filename);
8298             if (fd < 0) {
8299                 return fd;
8300             }
8301             unlink(filename);
8302         }
8303 
8304         if ((r = fake_open->fill(cpu_env, fd))) {
8305             int e = errno;
8306             close(fd);
8307             errno = e;
8308             return r;
8309         }
8310         lseek(fd, 0, SEEK_SET);
8311 
8312         return fd;
8313     }
8314 
8315     return safe_openat(dirfd, path(pathname), flags, mode);
8316 }
8317 
8318 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8319                        abi_long pathname, abi_long guest_argp,
8320                        abi_long guest_envp, int flags)
8321 {
8322     int ret;
8323     char **argp, **envp;
8324     int argc, envc;
8325     abi_ulong gp;
8326     abi_ulong addr;
8327     char **q;
8328     void *p;
8329 
8330     argc = 0;
8331 
8332     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8333         if (get_user_ual(addr, gp)) {
8334             return -TARGET_EFAULT;
8335         }
8336         if (!addr) {
8337             break;
8338         }
8339         argc++;
8340     }
8341     envc = 0;
8342     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8343         if (get_user_ual(addr, gp)) {
8344             return -TARGET_EFAULT;
8345         }
8346         if (!addr) {
8347             break;
8348         }
8349         envc++;
8350     }
8351 
8352     argp = g_new0(char *, argc + 1);
8353     envp = g_new0(char *, envc + 1);
8354 
8355     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8356         if (get_user_ual(addr, gp)) {
8357             goto execve_efault;
8358         }
8359         if (!addr) {
8360             break;
8361         }
8362         *q = lock_user_string(addr);
8363         if (!*q) {
8364             goto execve_efault;
8365         }
8366     }
8367     *q = NULL;
8368 
8369     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8370         if (get_user_ual(addr, gp)) {
8371             goto execve_efault;
8372         }
8373         if (!addr) {
8374             break;
8375         }
8376         *q = lock_user_string(addr);
8377         if (!*q) {
8378             goto execve_efault;
8379         }
8380     }
8381     *q = NULL;
8382 
8383     /*
8384      * Although execve() is not an interruptible syscall it is
8385      * a special case where we must use the safe_syscall wrapper:
8386      * if we allow a signal to happen before we make the host
8387      * syscall then we will 'lose' it, because at the point of
8388      * execve the process leaves QEMU's control. So we use the
8389      * safe syscall wrapper to ensure that we either take the
8390      * signal as a guest signal, or else it does not happen
8391      * before the execve completes and makes it the other
8392      * program's problem.
8393      */
8394     p = lock_user_string(pathname);
8395     if (!p) {
8396         goto execve_efault;
8397     }
8398 
8399     if (is_proc_myself(p, "exe")) {
8400         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8401     } else {
8402         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8403     }
8404 
8405     unlock_user(p, pathname, 0);
8406 
8407     goto execve_end;
8408 
8409 execve_efault:
8410     ret = -TARGET_EFAULT;
8411 
8412 execve_end:
8413     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8414         if (get_user_ual(addr, gp) || !addr) {
8415             break;
8416         }
8417         unlock_user(*q, addr, 0);
8418     }
8419     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8420         if (get_user_ual(addr, gp) || !addr) {
8421             break;
8422         }
8423         unlock_user(*q, addr, 0);
8424     }
8425 
8426     g_free(argp);
8427     g_free(envp);
8428     return ret;
8429 }
8430 
8431 #define TIMER_MAGIC 0x0caf0000
8432 #define TIMER_MAGIC_MASK 0xffff0000
8433 
8434 /* Convert QEMU provided timer ID back to internal 16bit index format */
8435 static target_timer_t get_timer_id(abi_long arg)
8436 {
8437     target_timer_t timerid = arg;
8438 
8439     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8440         return -TARGET_EINVAL;
8441     }
8442 
8443     timerid &= 0xffff;
8444 
8445     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8446         return -TARGET_EINVAL;
8447     }
8448 
8449     return timerid;
8450 }
8451 
8452 static int target_to_host_cpu_mask(unsigned long *host_mask,
8453                                    size_t host_size,
8454                                    abi_ulong target_addr,
8455                                    size_t target_size)
8456 {
8457     unsigned target_bits = sizeof(abi_ulong) * 8;
8458     unsigned host_bits = sizeof(*host_mask) * 8;
8459     abi_ulong *target_mask;
8460     unsigned i, j;
8461 
8462     assert(host_size >= target_size);
8463 
8464     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8465     if (!target_mask) {
8466         return -TARGET_EFAULT;
8467     }
8468     memset(host_mask, 0, host_size);
8469 
8470     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8471         unsigned bit = i * target_bits;
8472         abi_ulong val;
8473 
8474         __get_user(val, &target_mask[i]);
8475         for (j = 0; j < target_bits; j++, bit++) {
8476             if (val & (1UL << j)) {
8477                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8478             }
8479         }
8480     }
8481 
8482     unlock_user(target_mask, target_addr, 0);
8483     return 0;
8484 }
8485 
8486 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8487                                    size_t host_size,
8488                                    abi_ulong target_addr,
8489                                    size_t target_size)
8490 {
8491     unsigned target_bits = sizeof(abi_ulong) * 8;
8492     unsigned host_bits = sizeof(*host_mask) * 8;
8493     abi_ulong *target_mask;
8494     unsigned i, j;
8495 
8496     assert(host_size >= target_size);
8497 
8498     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8499     if (!target_mask) {
8500         return -TARGET_EFAULT;
8501     }
8502 
8503     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8504         unsigned bit = i * target_bits;
8505         abi_ulong val = 0;
8506 
8507         for (j = 0; j < target_bits; j++, bit++) {
8508             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8509                 val |= 1UL << j;
8510             }
8511         }
8512         __put_user(val, &target_mask[i]);
8513     }
8514 
8515     unlock_user(target_mask, target_addr, target_size);
8516     return 0;
8517 }
8518 
8519 #ifdef TARGET_NR_getdents
8520 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8521 {
8522     g_autofree void *hdirp = NULL;
8523     void *tdirp;
8524     int hlen, hoff, toff;
8525     int hreclen, treclen;
8526     off64_t prev_diroff = 0;
8527 
8528     hdirp = g_try_malloc(count);
8529     if (!hdirp) {
8530         return -TARGET_ENOMEM;
8531     }
8532 
8533 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8534     hlen = sys_getdents(dirfd, hdirp, count);
8535 #else
8536     hlen = sys_getdents64(dirfd, hdirp, count);
8537 #endif
8538 
8539     hlen = get_errno(hlen);
8540     if (is_error(hlen)) {
8541         return hlen;
8542     }
8543 
8544     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8545     if (!tdirp) {
8546         return -TARGET_EFAULT;
8547     }
8548 
8549     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8550 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8551         struct linux_dirent *hde = hdirp + hoff;
8552 #else
8553         struct linux_dirent64 *hde = hdirp + hoff;
8554 #endif
8555         struct target_dirent *tde = tdirp + toff;
8556         int namelen;
8557         uint8_t type;
8558 
8559         namelen = strlen(hde->d_name);
8560         hreclen = hde->d_reclen;
8561         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8562         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8563 
8564         if (toff + treclen > count) {
8565             /*
8566              * If the host struct is smaller than the target struct, or
8567              * requires less alignment and thus packs into less space,
8568              * then the host can return more entries than we can pass
8569              * on to the guest.
8570              */
8571             if (toff == 0) {
8572                 toff = -TARGET_EINVAL; /* result buffer is too small */
8573                 break;
8574             }
8575             /*
8576              * Return what we have, resetting the file pointer to the
8577              * location of the first record not returned.
8578              */
8579             lseek64(dirfd, prev_diroff, SEEK_SET);
8580             break;
8581         }
8582 
8583         prev_diroff = hde->d_off;
8584         tde->d_ino = tswapal(hde->d_ino);
8585         tde->d_off = tswapal(hde->d_off);
8586         tde->d_reclen = tswap16(treclen);
8587         memcpy(tde->d_name, hde->d_name, namelen + 1);
8588 
8589         /*
8590          * The getdents type is in what was formerly a padding byte at the
8591          * end of the structure.
8592          */
8593 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8594         type = *((uint8_t *)hde + hreclen - 1);
8595 #else
8596         type = hde->d_type;
8597 #endif
8598         *((uint8_t *)tde + treclen - 1) = type;
8599     }
8600 
8601     unlock_user(tdirp, arg2, toff);
8602     return toff;
8603 }
8604 #endif /* TARGET_NR_getdents */
8605 
8606 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8607 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8608 {
8609     g_autofree void *hdirp = NULL;
8610     void *tdirp;
8611     int hlen, hoff, toff;
8612     int hreclen, treclen;
8613     off64_t prev_diroff = 0;
8614 
8615     hdirp = g_try_malloc(count);
8616     if (!hdirp) {
8617         return -TARGET_ENOMEM;
8618     }
8619 
8620     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8621     if (is_error(hlen)) {
8622         return hlen;
8623     }
8624 
8625     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8626     if (!tdirp) {
8627         return -TARGET_EFAULT;
8628     }
8629 
8630     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8631         struct linux_dirent64 *hde = hdirp + hoff;
8632         struct target_dirent64 *tde = tdirp + toff;
8633         int namelen;
8634 
8635         namelen = strlen(hde->d_name) + 1;
8636         hreclen = hde->d_reclen;
8637         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8638         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8639 
8640         if (toff + treclen > count) {
8641             /*
8642              * If the host struct is smaller than the target struct, or
8643              * requires less alignment and thus packs into less space,
8644              * then the host can return more entries than we can pass
8645              * on to the guest.
8646              */
8647             if (toff == 0) {
8648                 toff = -TARGET_EINVAL; /* result buffer is too small */
8649                 break;
8650             }
8651             /*
8652              * Return what we have, resetting the file pointer to the
8653              * location of the first record not returned.
8654              */
8655             lseek64(dirfd, prev_diroff, SEEK_SET);
8656             break;
8657         }
8658 
8659         prev_diroff = hde->d_off;
8660         tde->d_ino = tswap64(hde->d_ino);
8661         tde->d_off = tswap64(hde->d_off);
8662         tde->d_reclen = tswap16(treclen);
8663         tde->d_type = hde->d_type;
8664         memcpy(tde->d_name, hde->d_name, namelen);
8665     }
8666 
8667     unlock_user(tdirp, arg2, toff);
8668     return toff;
8669 }
8670 #endif /* TARGET_NR_getdents64 */
8671 
8672 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8673 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8674 #endif
8675 
8676 /* This is an internal helper for do_syscall so that it is easier
8677  * to have a single return point, so that actions, such as logging
8678  * of syscall results, can be performed.
8679  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8680  */
8681 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8682                             abi_long arg2, abi_long arg3, abi_long arg4,
8683                             abi_long arg5, abi_long arg6, abi_long arg7,
8684                             abi_long arg8)
8685 {
8686     CPUState *cpu = env_cpu(cpu_env);
8687     abi_long ret;
8688 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8689     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8690     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8691     || defined(TARGET_NR_statx)
8692     struct stat st;
8693 #endif
8694 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8695     || defined(TARGET_NR_fstatfs)
8696     struct statfs stfs;
8697 #endif
8698     void *p;
8699 
8700     switch(num) {
8701     case TARGET_NR_exit:
8702         /* In old applications this may be used to implement _exit(2).
8703            However in threaded applications it is used for thread termination,
8704            and _exit_group is used for application termination.
8705            Do thread termination if we have more then one thread.  */
8706 
8707         if (block_signals()) {
8708             return -QEMU_ERESTARTSYS;
8709         }
8710 
8711         pthread_mutex_lock(&clone_lock);
8712 
8713         if (CPU_NEXT(first_cpu)) {
8714             TaskState *ts = cpu->opaque;
8715 
8716             if (ts->child_tidptr) {
8717                 put_user_u32(0, ts->child_tidptr);
8718                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8719                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8720             }
8721 
8722             object_unparent(OBJECT(cpu));
8723             object_unref(OBJECT(cpu));
8724             /*
8725              * At this point the CPU should be unrealized and removed
8726              * from cpu lists. We can clean-up the rest of the thread
8727              * data without the lock held.
8728              */
8729 
8730             pthread_mutex_unlock(&clone_lock);
8731 
8732             thread_cpu = NULL;
8733             g_free(ts);
8734             rcu_unregister_thread();
8735             pthread_exit(NULL);
8736         }
8737 
8738         pthread_mutex_unlock(&clone_lock);
8739         preexit_cleanup(cpu_env, arg1);
8740         _exit(arg1);
8741         return 0; /* avoid warning */
8742     case TARGET_NR_read:
8743         if (arg2 == 0 && arg3 == 0) {
8744             return get_errno(safe_read(arg1, 0, 0));
8745         } else {
8746             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8747                 return -TARGET_EFAULT;
8748             ret = get_errno(safe_read(arg1, p, arg3));
8749             if (ret >= 0 &&
8750                 fd_trans_host_to_target_data(arg1)) {
8751                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8752             }
8753             unlock_user(p, arg2, ret);
8754         }
8755         return ret;
8756     case TARGET_NR_write:
8757         if (arg2 == 0 && arg3 == 0) {
8758             return get_errno(safe_write(arg1, 0, 0));
8759         }
8760         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8761             return -TARGET_EFAULT;
8762         if (fd_trans_target_to_host_data(arg1)) {
8763             void *copy = g_malloc(arg3);
8764             memcpy(copy, p, arg3);
8765             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8766             if (ret >= 0) {
8767                 ret = get_errno(safe_write(arg1, copy, ret));
8768             }
8769             g_free(copy);
8770         } else {
8771             ret = get_errno(safe_write(arg1, p, arg3));
8772         }
8773         unlock_user(p, arg2, 0);
8774         return ret;
8775 
8776 #ifdef TARGET_NR_open
8777     case TARGET_NR_open:
8778         if (!(p = lock_user_string(arg1)))
8779             return -TARGET_EFAULT;
8780         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8781                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8782                                   arg3));
8783         fd_trans_unregister(ret);
8784         unlock_user(p, arg1, 0);
8785         return ret;
8786 #endif
8787     case TARGET_NR_openat:
8788         if (!(p = lock_user_string(arg2)))
8789             return -TARGET_EFAULT;
8790         ret = get_errno(do_openat(cpu_env, arg1, p,
8791                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8792                                   arg4));
8793         fd_trans_unregister(ret);
8794         unlock_user(p, arg2, 0);
8795         return ret;
8796 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8797     case TARGET_NR_name_to_handle_at:
8798         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8799         return ret;
8800 #endif
8801 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8802     case TARGET_NR_open_by_handle_at:
8803         ret = do_open_by_handle_at(arg1, arg2, arg3);
8804         fd_trans_unregister(ret);
8805         return ret;
8806 #endif
8807 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8808     case TARGET_NR_pidfd_open:
8809         return get_errno(pidfd_open(arg1, arg2));
8810 #endif
8811 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8812     case TARGET_NR_pidfd_send_signal:
8813         {
8814             siginfo_t uinfo, *puinfo;
8815 
8816             if (arg3) {
8817                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8818                 if (!p) {
8819                     return -TARGET_EFAULT;
8820                  }
8821                  target_to_host_siginfo(&uinfo, p);
8822                  unlock_user(p, arg3, 0);
8823                  puinfo = &uinfo;
8824             } else {
8825                  puinfo = NULL;
8826             }
8827             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8828                                               puinfo, arg4));
8829         }
8830         return ret;
8831 #endif
8832 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8833     case TARGET_NR_pidfd_getfd:
8834         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8835 #endif
8836     case TARGET_NR_close:
8837         fd_trans_unregister(arg1);
8838         return get_errno(close(arg1));
8839 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8840     case TARGET_NR_close_range:
8841         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8842         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8843             abi_long fd, maxfd;
8844             maxfd = MIN(arg2, target_fd_max);
8845             for (fd = arg1; fd < maxfd; fd++) {
8846                 fd_trans_unregister(fd);
8847             }
8848         }
8849         return ret;
8850 #endif
8851 
8852     case TARGET_NR_brk:
8853         return do_brk(arg1);
8854 #ifdef TARGET_NR_fork
8855     case TARGET_NR_fork:
8856         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8857 #endif
8858 #ifdef TARGET_NR_waitpid
8859     case TARGET_NR_waitpid:
8860         {
8861             int status;
8862             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8863             if (!is_error(ret) && arg2 && ret
8864                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8865                 return -TARGET_EFAULT;
8866         }
8867         return ret;
8868 #endif
8869 #ifdef TARGET_NR_waitid
8870     case TARGET_NR_waitid:
8871         {
8872             siginfo_t info;
8873             info.si_pid = 0;
8874             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8875             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8876                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8877                     return -TARGET_EFAULT;
8878                 host_to_target_siginfo(p, &info);
8879                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8880             }
8881         }
8882         return ret;
8883 #endif
8884 #ifdef TARGET_NR_creat /* not on alpha */
8885     case TARGET_NR_creat:
8886         if (!(p = lock_user_string(arg1)))
8887             return -TARGET_EFAULT;
8888         ret = get_errno(creat(p, arg2));
8889         fd_trans_unregister(ret);
8890         unlock_user(p, arg1, 0);
8891         return ret;
8892 #endif
8893 #ifdef TARGET_NR_link
8894     case TARGET_NR_link:
8895         {
8896             void * p2;
8897             p = lock_user_string(arg1);
8898             p2 = lock_user_string(arg2);
8899             if (!p || !p2)
8900                 ret = -TARGET_EFAULT;
8901             else
8902                 ret = get_errno(link(p, p2));
8903             unlock_user(p2, arg2, 0);
8904             unlock_user(p, arg1, 0);
8905         }
8906         return ret;
8907 #endif
8908 #if defined(TARGET_NR_linkat)
8909     case TARGET_NR_linkat:
8910         {
8911             void * p2 = NULL;
8912             if (!arg2 || !arg4)
8913                 return -TARGET_EFAULT;
8914             p  = lock_user_string(arg2);
8915             p2 = lock_user_string(arg4);
8916             if (!p || !p2)
8917                 ret = -TARGET_EFAULT;
8918             else
8919                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8920             unlock_user(p, arg2, 0);
8921             unlock_user(p2, arg4, 0);
8922         }
8923         return ret;
8924 #endif
8925 #ifdef TARGET_NR_unlink
8926     case TARGET_NR_unlink:
8927         if (!(p = lock_user_string(arg1)))
8928             return -TARGET_EFAULT;
8929         ret = get_errno(unlink(p));
8930         unlock_user(p, arg1, 0);
8931         return ret;
8932 #endif
8933 #if defined(TARGET_NR_unlinkat)
8934     case TARGET_NR_unlinkat:
8935         if (!(p = lock_user_string(arg2)))
8936             return -TARGET_EFAULT;
8937         ret = get_errno(unlinkat(arg1, p, arg3));
8938         unlock_user(p, arg2, 0);
8939         return ret;
8940 #endif
8941     case TARGET_NR_execveat:
8942         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
8943     case TARGET_NR_execve:
8944         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
8945     case TARGET_NR_chdir:
8946         if (!(p = lock_user_string(arg1)))
8947             return -TARGET_EFAULT;
8948         ret = get_errno(chdir(p));
8949         unlock_user(p, arg1, 0);
8950         return ret;
8951 #ifdef TARGET_NR_time
8952     case TARGET_NR_time:
8953         {
8954             time_t host_time;
8955             ret = get_errno(time(&host_time));
8956             if (!is_error(ret)
8957                 && arg1
8958                 && put_user_sal(host_time, arg1))
8959                 return -TARGET_EFAULT;
8960         }
8961         return ret;
8962 #endif
8963 #ifdef TARGET_NR_mknod
8964     case TARGET_NR_mknod:
8965         if (!(p = lock_user_string(arg1)))
8966             return -TARGET_EFAULT;
8967         ret = get_errno(mknod(p, arg2, arg3));
8968         unlock_user(p, arg1, 0);
8969         return ret;
8970 #endif
8971 #if defined(TARGET_NR_mknodat)
8972     case TARGET_NR_mknodat:
8973         if (!(p = lock_user_string(arg2)))
8974             return -TARGET_EFAULT;
8975         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8976         unlock_user(p, arg2, 0);
8977         return ret;
8978 #endif
8979 #ifdef TARGET_NR_chmod
8980     case TARGET_NR_chmod:
8981         if (!(p = lock_user_string(arg1)))
8982             return -TARGET_EFAULT;
8983         ret = get_errno(chmod(p, arg2));
8984         unlock_user(p, arg1, 0);
8985         return ret;
8986 #endif
8987 #ifdef TARGET_NR_lseek
8988     case TARGET_NR_lseek:
8989         return get_errno(lseek(arg1, arg2, arg3));
8990 #endif
8991 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8992     /* Alpha specific */
8993     case TARGET_NR_getxpid:
8994         cpu_env->ir[IR_A4] = getppid();
8995         return get_errno(getpid());
8996 #endif
8997 #ifdef TARGET_NR_getpid
8998     case TARGET_NR_getpid:
8999         return get_errno(getpid());
9000 #endif
9001     case TARGET_NR_mount:
9002         {
9003             /* need to look at the data field */
9004             void *p2, *p3;
9005 
9006             if (arg1) {
9007                 p = lock_user_string(arg1);
9008                 if (!p) {
9009                     return -TARGET_EFAULT;
9010                 }
9011             } else {
9012                 p = NULL;
9013             }
9014 
9015             p2 = lock_user_string(arg2);
9016             if (!p2) {
9017                 if (arg1) {
9018                     unlock_user(p, arg1, 0);
9019                 }
9020                 return -TARGET_EFAULT;
9021             }
9022 
9023             if (arg3) {
9024                 p3 = lock_user_string(arg3);
9025                 if (!p3) {
9026                     if (arg1) {
9027                         unlock_user(p, arg1, 0);
9028                     }
9029                     unlock_user(p2, arg2, 0);
9030                     return -TARGET_EFAULT;
9031                 }
9032             } else {
9033                 p3 = NULL;
9034             }
9035 
9036             /* FIXME - arg5 should be locked, but it isn't clear how to
9037              * do that since it's not guaranteed to be a NULL-terminated
9038              * string.
9039              */
9040             if (!arg5) {
9041                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9042             } else {
9043                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9044             }
9045             ret = get_errno(ret);
9046 
9047             if (arg1) {
9048                 unlock_user(p, arg1, 0);
9049             }
9050             unlock_user(p2, arg2, 0);
9051             if (arg3) {
9052                 unlock_user(p3, arg3, 0);
9053             }
9054         }
9055         return ret;
9056 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9057 #if defined(TARGET_NR_umount)
9058     case TARGET_NR_umount:
9059 #endif
9060 #if defined(TARGET_NR_oldumount)
9061     case TARGET_NR_oldumount:
9062 #endif
9063         if (!(p = lock_user_string(arg1)))
9064             return -TARGET_EFAULT;
9065         ret = get_errno(umount(p));
9066         unlock_user(p, arg1, 0);
9067         return ret;
9068 #endif
9069 #ifdef TARGET_NR_stime /* not on alpha */
9070     case TARGET_NR_stime:
9071         {
9072             struct timespec ts;
9073             ts.tv_nsec = 0;
9074             if (get_user_sal(ts.tv_sec, arg1)) {
9075                 return -TARGET_EFAULT;
9076             }
9077             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9078         }
9079 #endif
9080 #ifdef TARGET_NR_alarm /* not on alpha */
9081     case TARGET_NR_alarm:
9082         return alarm(arg1);
9083 #endif
9084 #ifdef TARGET_NR_pause /* not on alpha */
9085     case TARGET_NR_pause:
9086         if (!block_signals()) {
9087             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9088         }
9089         return -TARGET_EINTR;
9090 #endif
9091 #ifdef TARGET_NR_utime
9092     case TARGET_NR_utime:
9093         {
9094             struct utimbuf tbuf, *host_tbuf;
9095             struct target_utimbuf *target_tbuf;
9096             if (arg2) {
9097                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9098                     return -TARGET_EFAULT;
9099                 tbuf.actime = tswapal(target_tbuf->actime);
9100                 tbuf.modtime = tswapal(target_tbuf->modtime);
9101                 unlock_user_struct(target_tbuf, arg2, 0);
9102                 host_tbuf = &tbuf;
9103             } else {
9104                 host_tbuf = NULL;
9105             }
9106             if (!(p = lock_user_string(arg1)))
9107                 return -TARGET_EFAULT;
9108             ret = get_errno(utime(p, host_tbuf));
9109             unlock_user(p, arg1, 0);
9110         }
9111         return ret;
9112 #endif
9113 #ifdef TARGET_NR_utimes
9114     case TARGET_NR_utimes:
9115         {
9116             struct timeval *tvp, tv[2];
9117             if (arg2) {
9118                 if (copy_from_user_timeval(&tv[0], arg2)
9119                     || copy_from_user_timeval(&tv[1],
9120                                               arg2 + sizeof(struct target_timeval)))
9121                     return -TARGET_EFAULT;
9122                 tvp = tv;
9123             } else {
9124                 tvp = NULL;
9125             }
9126             if (!(p = lock_user_string(arg1)))
9127                 return -TARGET_EFAULT;
9128             ret = get_errno(utimes(p, tvp));
9129             unlock_user(p, arg1, 0);
9130         }
9131         return ret;
9132 #endif
9133 #if defined(TARGET_NR_futimesat)
9134     case TARGET_NR_futimesat:
9135         {
9136             struct timeval *tvp, tv[2];
9137             if (arg3) {
9138                 if (copy_from_user_timeval(&tv[0], arg3)
9139                     || copy_from_user_timeval(&tv[1],
9140                                               arg3 + sizeof(struct target_timeval)))
9141                     return -TARGET_EFAULT;
9142                 tvp = tv;
9143             } else {
9144                 tvp = NULL;
9145             }
9146             if (!(p = lock_user_string(arg2))) {
9147                 return -TARGET_EFAULT;
9148             }
9149             ret = get_errno(futimesat(arg1, path(p), tvp));
9150             unlock_user(p, arg2, 0);
9151         }
9152         return ret;
9153 #endif
9154 #ifdef TARGET_NR_access
9155     case TARGET_NR_access:
9156         if (!(p = lock_user_string(arg1))) {
9157             return -TARGET_EFAULT;
9158         }
9159         ret = get_errno(access(path(p), arg2));
9160         unlock_user(p, arg1, 0);
9161         return ret;
9162 #endif
9163 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9164     case TARGET_NR_faccessat:
9165         if (!(p = lock_user_string(arg2))) {
9166             return -TARGET_EFAULT;
9167         }
9168         ret = get_errno(faccessat(arg1, p, arg3, 0));
9169         unlock_user(p, arg2, 0);
9170         return ret;
9171 #endif
9172 #if defined(TARGET_NR_faccessat2)
9173     case TARGET_NR_faccessat2:
9174         if (!(p = lock_user_string(arg2))) {
9175             return -TARGET_EFAULT;
9176         }
9177         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9178         unlock_user(p, arg2, 0);
9179         return ret;
9180 #endif
9181 #ifdef TARGET_NR_nice /* not on alpha */
9182     case TARGET_NR_nice:
9183         return get_errno(nice(arg1));
9184 #endif
9185     case TARGET_NR_sync:
9186         sync();
9187         return 0;
9188 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9189     case TARGET_NR_syncfs:
9190         return get_errno(syncfs(arg1));
9191 #endif
9192     case TARGET_NR_kill:
9193         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9194 #ifdef TARGET_NR_rename
9195     case TARGET_NR_rename:
9196         {
9197             void *p2;
9198             p = lock_user_string(arg1);
9199             p2 = lock_user_string(arg2);
9200             if (!p || !p2)
9201                 ret = -TARGET_EFAULT;
9202             else
9203                 ret = get_errno(rename(p, p2));
9204             unlock_user(p2, arg2, 0);
9205             unlock_user(p, arg1, 0);
9206         }
9207         return ret;
9208 #endif
9209 #if defined(TARGET_NR_renameat)
9210     case TARGET_NR_renameat:
9211         {
9212             void *p2;
9213             p  = lock_user_string(arg2);
9214             p2 = lock_user_string(arg4);
9215             if (!p || !p2)
9216                 ret = -TARGET_EFAULT;
9217             else
9218                 ret = get_errno(renameat(arg1, p, arg3, p2));
9219             unlock_user(p2, arg4, 0);
9220             unlock_user(p, arg2, 0);
9221         }
9222         return ret;
9223 #endif
9224 #if defined(TARGET_NR_renameat2)
9225     case TARGET_NR_renameat2:
9226         {
9227             void *p2;
9228             p  = lock_user_string(arg2);
9229             p2 = lock_user_string(arg4);
9230             if (!p || !p2) {
9231                 ret = -TARGET_EFAULT;
9232             } else {
9233                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9234             }
9235             unlock_user(p2, arg4, 0);
9236             unlock_user(p, arg2, 0);
9237         }
9238         return ret;
9239 #endif
9240 #ifdef TARGET_NR_mkdir
9241     case TARGET_NR_mkdir:
9242         if (!(p = lock_user_string(arg1)))
9243             return -TARGET_EFAULT;
9244         ret = get_errno(mkdir(p, arg2));
9245         unlock_user(p, arg1, 0);
9246         return ret;
9247 #endif
9248 #if defined(TARGET_NR_mkdirat)
9249     case TARGET_NR_mkdirat:
9250         if (!(p = lock_user_string(arg2)))
9251             return -TARGET_EFAULT;
9252         ret = get_errno(mkdirat(arg1, p, arg3));
9253         unlock_user(p, arg2, 0);
9254         return ret;
9255 #endif
9256 #ifdef TARGET_NR_rmdir
9257     case TARGET_NR_rmdir:
9258         if (!(p = lock_user_string(arg1)))
9259             return -TARGET_EFAULT;
9260         ret = get_errno(rmdir(p));
9261         unlock_user(p, arg1, 0);
9262         return ret;
9263 #endif
9264     case TARGET_NR_dup:
9265         ret = get_errno(dup(arg1));
9266         if (ret >= 0) {
9267             fd_trans_dup(arg1, ret);
9268         }
9269         return ret;
9270 #ifdef TARGET_NR_pipe
9271     case TARGET_NR_pipe:
9272         return do_pipe(cpu_env, arg1, 0, 0);
9273 #endif
9274 #ifdef TARGET_NR_pipe2
9275     case TARGET_NR_pipe2:
9276         return do_pipe(cpu_env, arg1,
9277                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9278 #endif
9279     case TARGET_NR_times:
9280         {
9281             struct target_tms *tmsp;
9282             struct tms tms;
9283             ret = get_errno(times(&tms));
9284             if (arg1) {
9285                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9286                 if (!tmsp)
9287                     return -TARGET_EFAULT;
9288                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9289                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9290                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9291                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9292             }
9293             if (!is_error(ret))
9294                 ret = host_to_target_clock_t(ret);
9295         }
9296         return ret;
9297     case TARGET_NR_acct:
9298         if (arg1 == 0) {
9299             ret = get_errno(acct(NULL));
9300         } else {
9301             if (!(p = lock_user_string(arg1))) {
9302                 return -TARGET_EFAULT;
9303             }
9304             ret = get_errno(acct(path(p)));
9305             unlock_user(p, arg1, 0);
9306         }
9307         return ret;
9308 #ifdef TARGET_NR_umount2
9309     case TARGET_NR_umount2:
9310         if (!(p = lock_user_string(arg1)))
9311             return -TARGET_EFAULT;
9312         ret = get_errno(umount2(p, arg2));
9313         unlock_user(p, arg1, 0);
9314         return ret;
9315 #endif
9316     case TARGET_NR_ioctl:
9317         return do_ioctl(arg1, arg2, arg3);
9318 #ifdef TARGET_NR_fcntl
9319     case TARGET_NR_fcntl:
9320         return do_fcntl(arg1, arg2, arg3);
9321 #endif
9322     case TARGET_NR_setpgid:
9323         return get_errno(setpgid(arg1, arg2));
9324     case TARGET_NR_umask:
9325         return get_errno(umask(arg1));
9326     case TARGET_NR_chroot:
9327         if (!(p = lock_user_string(arg1)))
9328             return -TARGET_EFAULT;
9329         ret = get_errno(chroot(p));
9330         unlock_user(p, arg1, 0);
9331         return ret;
9332 #ifdef TARGET_NR_dup2
9333     case TARGET_NR_dup2:
9334         ret = get_errno(dup2(arg1, arg2));
9335         if (ret >= 0) {
9336             fd_trans_dup(arg1, arg2);
9337         }
9338         return ret;
9339 #endif
9340 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9341     case TARGET_NR_dup3:
9342     {
9343         int host_flags;
9344 
9345         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9346             return -EINVAL;
9347         }
9348         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9349         ret = get_errno(dup3(arg1, arg2, host_flags));
9350         if (ret >= 0) {
9351             fd_trans_dup(arg1, arg2);
9352         }
9353         return ret;
9354     }
9355 #endif
9356 #ifdef TARGET_NR_getppid /* not on alpha */
9357     case TARGET_NR_getppid:
9358         return get_errno(getppid());
9359 #endif
9360 #ifdef TARGET_NR_getpgrp
9361     case TARGET_NR_getpgrp:
9362         return get_errno(getpgrp());
9363 #endif
9364     case TARGET_NR_setsid:
9365         return get_errno(setsid());
9366 #ifdef TARGET_NR_sigaction
9367     case TARGET_NR_sigaction:
9368         {
9369 #if defined(TARGET_MIPS)
9370 	    struct target_sigaction act, oact, *pact, *old_act;
9371 
9372 	    if (arg2) {
9373                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9374                     return -TARGET_EFAULT;
9375 		act._sa_handler = old_act->_sa_handler;
9376 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9377 		act.sa_flags = old_act->sa_flags;
9378 		unlock_user_struct(old_act, arg2, 0);
9379 		pact = &act;
9380 	    } else {
9381 		pact = NULL;
9382 	    }
9383 
9384         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9385 
9386 	    if (!is_error(ret) && arg3) {
9387                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9388                     return -TARGET_EFAULT;
9389 		old_act->_sa_handler = oact._sa_handler;
9390 		old_act->sa_flags = oact.sa_flags;
9391 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9392 		old_act->sa_mask.sig[1] = 0;
9393 		old_act->sa_mask.sig[2] = 0;
9394 		old_act->sa_mask.sig[3] = 0;
9395 		unlock_user_struct(old_act, arg3, 1);
9396 	    }
9397 #else
9398             struct target_old_sigaction *old_act;
9399             struct target_sigaction act, oact, *pact;
9400             if (arg2) {
9401                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9402                     return -TARGET_EFAULT;
9403                 act._sa_handler = old_act->_sa_handler;
9404                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9405                 act.sa_flags = old_act->sa_flags;
9406 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9407                 act.sa_restorer = old_act->sa_restorer;
9408 #endif
9409                 unlock_user_struct(old_act, arg2, 0);
9410                 pact = &act;
9411             } else {
9412                 pact = NULL;
9413             }
9414             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9415             if (!is_error(ret) && arg3) {
9416                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9417                     return -TARGET_EFAULT;
9418                 old_act->_sa_handler = oact._sa_handler;
9419                 old_act->sa_mask = oact.sa_mask.sig[0];
9420                 old_act->sa_flags = oact.sa_flags;
9421 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9422                 old_act->sa_restorer = oact.sa_restorer;
9423 #endif
9424                 unlock_user_struct(old_act, arg3, 1);
9425             }
9426 #endif
9427         }
9428         return ret;
9429 #endif
9430     case TARGET_NR_rt_sigaction:
9431         {
9432             /*
9433              * For Alpha and SPARC this is a 5 argument syscall, with
9434              * a 'restorer' parameter which must be copied into the
9435              * sa_restorer field of the sigaction struct.
9436              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9437              * and arg5 is the sigsetsize.
9438              */
9439 #if defined(TARGET_ALPHA)
9440             target_ulong sigsetsize = arg4;
9441             target_ulong restorer = arg5;
9442 #elif defined(TARGET_SPARC)
9443             target_ulong restorer = arg4;
9444             target_ulong sigsetsize = arg5;
9445 #else
9446             target_ulong sigsetsize = arg4;
9447             target_ulong restorer = 0;
9448 #endif
9449             struct target_sigaction *act = NULL;
9450             struct target_sigaction *oact = NULL;
9451 
9452             if (sigsetsize != sizeof(target_sigset_t)) {
9453                 return -TARGET_EINVAL;
9454             }
9455             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9456                 return -TARGET_EFAULT;
9457             }
9458             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9459                 ret = -TARGET_EFAULT;
9460             } else {
9461                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9462                 if (oact) {
9463                     unlock_user_struct(oact, arg3, 1);
9464                 }
9465             }
9466             if (act) {
9467                 unlock_user_struct(act, arg2, 0);
9468             }
9469         }
9470         return ret;
9471 #ifdef TARGET_NR_sgetmask /* not on alpha */
9472     case TARGET_NR_sgetmask:
9473         {
9474             sigset_t cur_set;
9475             abi_ulong target_set;
9476             ret = do_sigprocmask(0, NULL, &cur_set);
9477             if (!ret) {
9478                 host_to_target_old_sigset(&target_set, &cur_set);
9479                 ret = target_set;
9480             }
9481         }
9482         return ret;
9483 #endif
9484 #ifdef TARGET_NR_ssetmask /* not on alpha */
9485     case TARGET_NR_ssetmask:
9486         {
9487             sigset_t set, oset;
9488             abi_ulong target_set = arg1;
9489             target_to_host_old_sigset(&set, &target_set);
9490             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9491             if (!ret) {
9492                 host_to_target_old_sigset(&target_set, &oset);
9493                 ret = target_set;
9494             }
9495         }
9496         return ret;
9497 #endif
9498 #ifdef TARGET_NR_sigprocmask
9499     case TARGET_NR_sigprocmask:
9500         {
9501 #if defined(TARGET_ALPHA)
9502             sigset_t set, oldset;
9503             abi_ulong mask;
9504             int how;
9505 
9506             switch (arg1) {
9507             case TARGET_SIG_BLOCK:
9508                 how = SIG_BLOCK;
9509                 break;
9510             case TARGET_SIG_UNBLOCK:
9511                 how = SIG_UNBLOCK;
9512                 break;
9513             case TARGET_SIG_SETMASK:
9514                 how = SIG_SETMASK;
9515                 break;
9516             default:
9517                 return -TARGET_EINVAL;
9518             }
9519             mask = arg2;
9520             target_to_host_old_sigset(&set, &mask);
9521 
9522             ret = do_sigprocmask(how, &set, &oldset);
9523             if (!is_error(ret)) {
9524                 host_to_target_old_sigset(&mask, &oldset);
9525                 ret = mask;
9526                 cpu_env->ir[IR_V0] = 0; /* force no error */
9527             }
9528 #else
9529             sigset_t set, oldset, *set_ptr;
9530             int how;
9531 
9532             if (arg2) {
9533                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9534                 if (!p) {
9535                     return -TARGET_EFAULT;
9536                 }
9537                 target_to_host_old_sigset(&set, p);
9538                 unlock_user(p, arg2, 0);
9539                 set_ptr = &set;
9540                 switch (arg1) {
9541                 case TARGET_SIG_BLOCK:
9542                     how = SIG_BLOCK;
9543                     break;
9544                 case TARGET_SIG_UNBLOCK:
9545                     how = SIG_UNBLOCK;
9546                     break;
9547                 case TARGET_SIG_SETMASK:
9548                     how = SIG_SETMASK;
9549                     break;
9550                 default:
9551                     return -TARGET_EINVAL;
9552                 }
9553             } else {
9554                 how = 0;
9555                 set_ptr = NULL;
9556             }
9557             ret = do_sigprocmask(how, set_ptr, &oldset);
9558             if (!is_error(ret) && arg3) {
9559                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9560                     return -TARGET_EFAULT;
9561                 host_to_target_old_sigset(p, &oldset);
9562                 unlock_user(p, arg3, sizeof(target_sigset_t));
9563             }
9564 #endif
9565         }
9566         return ret;
9567 #endif
9568     case TARGET_NR_rt_sigprocmask:
9569         {
9570             int how = arg1;
9571             sigset_t set, oldset, *set_ptr;
9572 
9573             if (arg4 != sizeof(target_sigset_t)) {
9574                 return -TARGET_EINVAL;
9575             }
9576 
9577             if (arg2) {
9578                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9579                 if (!p) {
9580                     return -TARGET_EFAULT;
9581                 }
9582                 target_to_host_sigset(&set, p);
9583                 unlock_user(p, arg2, 0);
9584                 set_ptr = &set;
9585                 switch(how) {
9586                 case TARGET_SIG_BLOCK:
9587                     how = SIG_BLOCK;
9588                     break;
9589                 case TARGET_SIG_UNBLOCK:
9590                     how = SIG_UNBLOCK;
9591                     break;
9592                 case TARGET_SIG_SETMASK:
9593                     how = SIG_SETMASK;
9594                     break;
9595                 default:
9596                     return -TARGET_EINVAL;
9597                 }
9598             } else {
9599                 how = 0;
9600                 set_ptr = NULL;
9601             }
9602             ret = do_sigprocmask(how, set_ptr, &oldset);
9603             if (!is_error(ret) && arg3) {
9604                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9605                     return -TARGET_EFAULT;
9606                 host_to_target_sigset(p, &oldset);
9607                 unlock_user(p, arg3, sizeof(target_sigset_t));
9608             }
9609         }
9610         return ret;
9611 #ifdef TARGET_NR_sigpending
9612     case TARGET_NR_sigpending:
9613         {
9614             sigset_t set;
9615             ret = get_errno(sigpending(&set));
9616             if (!is_error(ret)) {
9617                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9618                     return -TARGET_EFAULT;
9619                 host_to_target_old_sigset(p, &set);
9620                 unlock_user(p, arg1, sizeof(target_sigset_t));
9621             }
9622         }
9623         return ret;
9624 #endif
9625     case TARGET_NR_rt_sigpending:
9626         {
9627             sigset_t set;
9628 
9629             /* Yes, this check is >, not != like most. We follow the kernel's
9630              * logic and it does it like this because it implements
9631              * NR_sigpending through the same code path, and in that case
9632              * the old_sigset_t is smaller in size.
9633              */
9634             if (arg2 > sizeof(target_sigset_t)) {
9635                 return -TARGET_EINVAL;
9636             }
9637 
9638             ret = get_errno(sigpending(&set));
9639             if (!is_error(ret)) {
9640                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9641                     return -TARGET_EFAULT;
9642                 host_to_target_sigset(p, &set);
9643                 unlock_user(p, arg1, sizeof(target_sigset_t));
9644             }
9645         }
9646         return ret;
9647 #ifdef TARGET_NR_sigsuspend
9648     case TARGET_NR_sigsuspend:
9649         {
9650             sigset_t *set;
9651 
9652 #if defined(TARGET_ALPHA)
9653             TaskState *ts = cpu->opaque;
9654             /* target_to_host_old_sigset will bswap back */
9655             abi_ulong mask = tswapal(arg1);
9656             set = &ts->sigsuspend_mask;
9657             target_to_host_old_sigset(set, &mask);
9658 #else
9659             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9660             if (ret != 0) {
9661                 return ret;
9662             }
9663 #endif
9664             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9665             finish_sigsuspend_mask(ret);
9666         }
9667         return ret;
9668 #endif
9669     case TARGET_NR_rt_sigsuspend:
9670         {
9671             sigset_t *set;
9672 
9673             ret = process_sigsuspend_mask(&set, arg1, arg2);
9674             if (ret != 0) {
9675                 return ret;
9676             }
9677             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9678             finish_sigsuspend_mask(ret);
9679         }
9680         return ret;
9681 #ifdef TARGET_NR_rt_sigtimedwait
9682     case TARGET_NR_rt_sigtimedwait:
9683         {
9684             sigset_t set;
9685             struct timespec uts, *puts;
9686             siginfo_t uinfo;
9687 
9688             if (arg4 != sizeof(target_sigset_t)) {
9689                 return -TARGET_EINVAL;
9690             }
9691 
9692             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9693                 return -TARGET_EFAULT;
9694             target_to_host_sigset(&set, p);
9695             unlock_user(p, arg1, 0);
9696             if (arg3) {
9697                 puts = &uts;
9698                 if (target_to_host_timespec(puts, arg3)) {
9699                     return -TARGET_EFAULT;
9700                 }
9701             } else {
9702                 puts = NULL;
9703             }
9704             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9705                                                  SIGSET_T_SIZE));
9706             if (!is_error(ret)) {
9707                 if (arg2) {
9708                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9709                                   0);
9710                     if (!p) {
9711                         return -TARGET_EFAULT;
9712                     }
9713                     host_to_target_siginfo(p, &uinfo);
9714                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9715                 }
9716                 ret = host_to_target_signal(ret);
9717             }
9718         }
9719         return ret;
9720 #endif
9721 #ifdef TARGET_NR_rt_sigtimedwait_time64
9722     case TARGET_NR_rt_sigtimedwait_time64:
9723         {
9724             sigset_t set;
9725             struct timespec uts, *puts;
9726             siginfo_t uinfo;
9727 
9728             if (arg4 != sizeof(target_sigset_t)) {
9729                 return -TARGET_EINVAL;
9730             }
9731 
9732             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9733             if (!p) {
9734                 return -TARGET_EFAULT;
9735             }
9736             target_to_host_sigset(&set, p);
9737             unlock_user(p, arg1, 0);
9738             if (arg3) {
9739                 puts = &uts;
9740                 if (target_to_host_timespec64(puts, arg3)) {
9741                     return -TARGET_EFAULT;
9742                 }
9743             } else {
9744                 puts = NULL;
9745             }
9746             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9747                                                  SIGSET_T_SIZE));
9748             if (!is_error(ret)) {
9749                 if (arg2) {
9750                     p = lock_user(VERIFY_WRITE, arg2,
9751                                   sizeof(target_siginfo_t), 0);
9752                     if (!p) {
9753                         return -TARGET_EFAULT;
9754                     }
9755                     host_to_target_siginfo(p, &uinfo);
9756                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9757                 }
9758                 ret = host_to_target_signal(ret);
9759             }
9760         }
9761         return ret;
9762 #endif
9763     case TARGET_NR_rt_sigqueueinfo:
9764         {
9765             siginfo_t uinfo;
9766 
9767             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9768             if (!p) {
9769                 return -TARGET_EFAULT;
9770             }
9771             target_to_host_siginfo(&uinfo, p);
9772             unlock_user(p, arg3, 0);
9773             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9774         }
9775         return ret;
9776     case TARGET_NR_rt_tgsigqueueinfo:
9777         {
9778             siginfo_t uinfo;
9779 
9780             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9781             if (!p) {
9782                 return -TARGET_EFAULT;
9783             }
9784             target_to_host_siginfo(&uinfo, p);
9785             unlock_user(p, arg4, 0);
9786             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9787         }
9788         return ret;
9789 #ifdef TARGET_NR_sigreturn
9790     case TARGET_NR_sigreturn:
9791         if (block_signals()) {
9792             return -QEMU_ERESTARTSYS;
9793         }
9794         return do_sigreturn(cpu_env);
9795 #endif
9796     case TARGET_NR_rt_sigreturn:
9797         if (block_signals()) {
9798             return -QEMU_ERESTARTSYS;
9799         }
9800         return do_rt_sigreturn(cpu_env);
9801     case TARGET_NR_sethostname:
9802         if (!(p = lock_user_string(arg1)))
9803             return -TARGET_EFAULT;
9804         ret = get_errno(sethostname(p, arg2));
9805         unlock_user(p, arg1, 0);
9806         return ret;
9807 #ifdef TARGET_NR_setrlimit
9808     case TARGET_NR_setrlimit:
9809         {
9810             int resource = target_to_host_resource(arg1);
9811             struct target_rlimit *target_rlim;
9812             struct rlimit rlim;
9813             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9814                 return -TARGET_EFAULT;
9815             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9816             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9817             unlock_user_struct(target_rlim, arg2, 0);
9818             /*
9819              * If we just passed through resource limit settings for memory then
9820              * they would also apply to QEMU's own allocations, and QEMU will
9821              * crash or hang or die if its allocations fail. Ideally we would
9822              * track the guest allocations in QEMU and apply the limits ourselves.
9823              * For now, just tell the guest the call succeeded but don't actually
9824              * limit anything.
9825              */
9826             if (resource != RLIMIT_AS &&
9827                 resource != RLIMIT_DATA &&
9828                 resource != RLIMIT_STACK) {
9829                 return get_errno(setrlimit(resource, &rlim));
9830             } else {
9831                 return 0;
9832             }
9833         }
9834 #endif
9835 #ifdef TARGET_NR_getrlimit
9836     case TARGET_NR_getrlimit:
9837         {
9838             int resource = target_to_host_resource(arg1);
9839             struct target_rlimit *target_rlim;
9840             struct rlimit rlim;
9841 
9842             ret = get_errno(getrlimit(resource, &rlim));
9843             if (!is_error(ret)) {
9844                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9845                     return -TARGET_EFAULT;
9846                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9847                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9848                 unlock_user_struct(target_rlim, arg2, 1);
9849             }
9850         }
9851         return ret;
9852 #endif
9853     case TARGET_NR_getrusage:
9854         {
9855             struct rusage rusage;
9856             ret = get_errno(getrusage(arg1, &rusage));
9857             if (!is_error(ret)) {
9858                 ret = host_to_target_rusage(arg2, &rusage);
9859             }
9860         }
9861         return ret;
9862 #if defined(TARGET_NR_gettimeofday)
9863     case TARGET_NR_gettimeofday:
9864         {
9865             struct timeval tv;
9866             struct timezone tz;
9867 
9868             ret = get_errno(gettimeofday(&tv, &tz));
9869             if (!is_error(ret)) {
9870                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9871                     return -TARGET_EFAULT;
9872                 }
9873                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9874                     return -TARGET_EFAULT;
9875                 }
9876             }
9877         }
9878         return ret;
9879 #endif
9880 #if defined(TARGET_NR_settimeofday)
9881     case TARGET_NR_settimeofday:
9882         {
9883             struct timeval tv, *ptv = NULL;
9884             struct timezone tz, *ptz = NULL;
9885 
9886             if (arg1) {
9887                 if (copy_from_user_timeval(&tv, arg1)) {
9888                     return -TARGET_EFAULT;
9889                 }
9890                 ptv = &tv;
9891             }
9892 
9893             if (arg2) {
9894                 if (copy_from_user_timezone(&tz, arg2)) {
9895                     return -TARGET_EFAULT;
9896                 }
9897                 ptz = &tz;
9898             }
9899 
9900             return get_errno(settimeofday(ptv, ptz));
9901         }
9902 #endif
9903 #if defined(TARGET_NR_select)
9904     case TARGET_NR_select:
9905 #if defined(TARGET_WANT_NI_OLD_SELECT)
9906         /* some architectures used to have old_select here
9907          * but now ENOSYS it.
9908          */
9909         ret = -TARGET_ENOSYS;
9910 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9911         ret = do_old_select(arg1);
9912 #else
9913         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9914 #endif
9915         return ret;
9916 #endif
9917 #ifdef TARGET_NR_pselect6
9918     case TARGET_NR_pselect6:
9919         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9920 #endif
9921 #ifdef TARGET_NR_pselect6_time64
9922     case TARGET_NR_pselect6_time64:
9923         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9924 #endif
9925 #ifdef TARGET_NR_symlink
9926     case TARGET_NR_symlink:
9927         {
9928             void *p2;
9929             p = lock_user_string(arg1);
9930             p2 = lock_user_string(arg2);
9931             if (!p || !p2)
9932                 ret = -TARGET_EFAULT;
9933             else
9934                 ret = get_errno(symlink(p, p2));
9935             unlock_user(p2, arg2, 0);
9936             unlock_user(p, arg1, 0);
9937         }
9938         return ret;
9939 #endif
9940 #if defined(TARGET_NR_symlinkat)
9941     case TARGET_NR_symlinkat:
9942         {
9943             void *p2;
9944             p  = lock_user_string(arg1);
9945             p2 = lock_user_string(arg3);
9946             if (!p || !p2)
9947                 ret = -TARGET_EFAULT;
9948             else
9949                 ret = get_errno(symlinkat(p, arg2, p2));
9950             unlock_user(p2, arg3, 0);
9951             unlock_user(p, arg1, 0);
9952         }
9953         return ret;
9954 #endif
9955 #ifdef TARGET_NR_readlink
9956     case TARGET_NR_readlink:
9957         {
9958             void *p2;
9959             p = lock_user_string(arg1);
9960             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9961             if (!p || !p2) {
9962                 ret = -TARGET_EFAULT;
9963             } else if (!arg3) {
9964                 /* Short circuit this for the magic exe check. */
9965                 ret = -TARGET_EINVAL;
9966             } else if (is_proc_myself((const char *)p, "exe")) {
9967                 char real[PATH_MAX], *temp;
9968                 temp = realpath(exec_path, real);
9969                 /* Return value is # of bytes that we wrote to the buffer. */
9970                 if (temp == NULL) {
9971                     ret = get_errno(-1);
9972                 } else {
9973                     /* Don't worry about sign mismatch as earlier mapping
9974                      * logic would have thrown a bad address error. */
9975                     ret = MIN(strlen(real), arg3);
9976                     /* We cannot NUL terminate the string. */
9977                     memcpy(p2, real, ret);
9978                 }
9979             } else {
9980                 ret = get_errno(readlink(path(p), p2, arg3));
9981             }
9982             unlock_user(p2, arg2, ret);
9983             unlock_user(p, arg1, 0);
9984         }
9985         return ret;
9986 #endif
9987 #if defined(TARGET_NR_readlinkat)
9988     case TARGET_NR_readlinkat:
9989         {
9990             void *p2;
9991             p  = lock_user_string(arg2);
9992             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9993             if (!p || !p2) {
9994                 ret = -TARGET_EFAULT;
9995             } else if (!arg4) {
9996                 /* Short circuit this for the magic exe check. */
9997                 ret = -TARGET_EINVAL;
9998             } else if (is_proc_myself((const char *)p, "exe")) {
9999                 char real[PATH_MAX], *temp;
10000                 temp = realpath(exec_path, real);
10001                 /* Return value is # of bytes that we wrote to the buffer. */
10002                 if (temp == NULL) {
10003                     ret = get_errno(-1);
10004                 } else {
10005                     /* Don't worry about sign mismatch as earlier mapping
10006                      * logic would have thrown a bad address error. */
10007                     ret = MIN(strlen(real), arg4);
10008                     /* We cannot NUL terminate the string. */
10009                     memcpy(p2, real, ret);
10010                 }
10011             } else {
10012                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10013             }
10014             unlock_user(p2, arg3, ret);
10015             unlock_user(p, arg2, 0);
10016         }
10017         return ret;
10018 #endif
10019 #ifdef TARGET_NR_swapon
10020     case TARGET_NR_swapon:
10021         if (!(p = lock_user_string(arg1)))
10022             return -TARGET_EFAULT;
10023         ret = get_errno(swapon(p, arg2));
10024         unlock_user(p, arg1, 0);
10025         return ret;
10026 #endif
10027     case TARGET_NR_reboot:
10028         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10029            /* arg4 must be ignored in all other cases */
10030            p = lock_user_string(arg4);
10031            if (!p) {
10032                return -TARGET_EFAULT;
10033            }
10034            ret = get_errno(reboot(arg1, arg2, arg3, p));
10035            unlock_user(p, arg4, 0);
10036         } else {
10037            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10038         }
10039         return ret;
10040 #ifdef TARGET_NR_mmap
10041     case TARGET_NR_mmap:
10042 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10043     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10044     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10045     || defined(TARGET_S390X)
10046         {
10047             abi_ulong *v;
10048             abi_ulong v1, v2, v3, v4, v5, v6;
10049             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10050                 return -TARGET_EFAULT;
10051             v1 = tswapal(v[0]);
10052             v2 = tswapal(v[1]);
10053             v3 = tswapal(v[2]);
10054             v4 = tswapal(v[3]);
10055             v5 = tswapal(v[4]);
10056             v6 = tswapal(v[5]);
10057             unlock_user(v, arg1, 0);
10058             ret = get_errno(target_mmap(v1, v2, v3,
10059                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10060                                         v5, v6));
10061         }
10062 #else
10063         /* mmap pointers are always untagged */
10064         ret = get_errno(target_mmap(arg1, arg2, arg3,
10065                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10066                                     arg5,
10067                                     arg6));
10068 #endif
10069         return ret;
10070 #endif
10071 #ifdef TARGET_NR_mmap2
10072     case TARGET_NR_mmap2:
10073 #ifndef MMAP_SHIFT
10074 #define MMAP_SHIFT 12
10075 #endif
10076         ret = target_mmap(arg1, arg2, arg3,
10077                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10078                           arg5, arg6 << MMAP_SHIFT);
10079         return get_errno(ret);
10080 #endif
10081     case TARGET_NR_munmap:
10082         arg1 = cpu_untagged_addr(cpu, arg1);
10083         return get_errno(target_munmap(arg1, arg2));
10084     case TARGET_NR_mprotect:
10085         arg1 = cpu_untagged_addr(cpu, arg1);
10086         {
10087             TaskState *ts = cpu->opaque;
10088             /* Special hack to detect libc making the stack executable.  */
10089             if ((arg3 & PROT_GROWSDOWN)
10090                 && arg1 >= ts->info->stack_limit
10091                 && arg1 <= ts->info->start_stack) {
10092                 arg3 &= ~PROT_GROWSDOWN;
10093                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10094                 arg1 = ts->info->stack_limit;
10095             }
10096         }
10097         return get_errno(target_mprotect(arg1, arg2, arg3));
10098 #ifdef TARGET_NR_mremap
10099     case TARGET_NR_mremap:
10100         arg1 = cpu_untagged_addr(cpu, arg1);
10101         /* mremap new_addr (arg5) is always untagged */
10102         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10103 #endif
10104         /* ??? msync/mlock/munlock are broken for softmmu.  */
10105 #ifdef TARGET_NR_msync
10106     case TARGET_NR_msync:
10107         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10108 #endif
10109 #ifdef TARGET_NR_mlock
10110     case TARGET_NR_mlock:
10111         return get_errno(mlock(g2h(cpu, arg1), arg2));
10112 #endif
10113 #ifdef TARGET_NR_munlock
10114     case TARGET_NR_munlock:
10115         return get_errno(munlock(g2h(cpu, arg1), arg2));
10116 #endif
10117 #ifdef TARGET_NR_mlockall
10118     case TARGET_NR_mlockall:
10119         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10120 #endif
10121 #ifdef TARGET_NR_munlockall
10122     case TARGET_NR_munlockall:
10123         return get_errno(munlockall());
10124 #endif
10125 #ifdef TARGET_NR_truncate
10126     case TARGET_NR_truncate:
10127         if (!(p = lock_user_string(arg1)))
10128             return -TARGET_EFAULT;
10129         ret = get_errno(truncate(p, arg2));
10130         unlock_user(p, arg1, 0);
10131         return ret;
10132 #endif
10133 #ifdef TARGET_NR_ftruncate
10134     case TARGET_NR_ftruncate:
10135         return get_errno(ftruncate(arg1, arg2));
10136 #endif
10137     case TARGET_NR_fchmod:
10138         return get_errno(fchmod(arg1, arg2));
10139 #if defined(TARGET_NR_fchmodat)
10140     case TARGET_NR_fchmodat:
10141         if (!(p = lock_user_string(arg2)))
10142             return -TARGET_EFAULT;
10143         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10144         unlock_user(p, arg2, 0);
10145         return ret;
10146 #endif
10147     case TARGET_NR_getpriority:
10148         /* Note that negative values are valid for getpriority, so we must
10149            differentiate based on errno settings.  */
10150         errno = 0;
10151         ret = getpriority(arg1, arg2);
10152         if (ret == -1 && errno != 0) {
10153             return -host_to_target_errno(errno);
10154         }
10155 #ifdef TARGET_ALPHA
10156         /* Return value is the unbiased priority.  Signal no error.  */
10157         cpu_env->ir[IR_V0] = 0;
10158 #else
10159         /* Return value is a biased priority to avoid negative numbers.  */
10160         ret = 20 - ret;
10161 #endif
10162         return ret;
10163     case TARGET_NR_setpriority:
10164         return get_errno(setpriority(arg1, arg2, arg3));
10165 #ifdef TARGET_NR_statfs
10166     case TARGET_NR_statfs:
10167         if (!(p = lock_user_string(arg1))) {
10168             return -TARGET_EFAULT;
10169         }
10170         ret = get_errno(statfs(path(p), &stfs));
10171         unlock_user(p, arg1, 0);
10172     convert_statfs:
10173         if (!is_error(ret)) {
10174             struct target_statfs *target_stfs;
10175 
10176             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10177                 return -TARGET_EFAULT;
10178             __put_user(stfs.f_type, &target_stfs->f_type);
10179             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10180             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10181             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10182             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10183             __put_user(stfs.f_files, &target_stfs->f_files);
10184             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10185             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10186             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10187             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10188             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10189 #ifdef _STATFS_F_FLAGS
10190             __put_user(stfs.f_flags, &target_stfs->f_flags);
10191 #else
10192             __put_user(0, &target_stfs->f_flags);
10193 #endif
10194             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10195             unlock_user_struct(target_stfs, arg2, 1);
10196         }
10197         return ret;
10198 #endif
10199 #ifdef TARGET_NR_fstatfs
10200     case TARGET_NR_fstatfs:
10201         ret = get_errno(fstatfs(arg1, &stfs));
10202         goto convert_statfs;
10203 #endif
10204 #ifdef TARGET_NR_statfs64
10205     case TARGET_NR_statfs64:
10206         if (!(p = lock_user_string(arg1))) {
10207             return -TARGET_EFAULT;
10208         }
10209         ret = get_errno(statfs(path(p), &stfs));
10210         unlock_user(p, arg1, 0);
10211     convert_statfs64:
10212         if (!is_error(ret)) {
10213             struct target_statfs64 *target_stfs;
10214 
10215             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10216                 return -TARGET_EFAULT;
10217             __put_user(stfs.f_type, &target_stfs->f_type);
10218             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10219             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10220             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10221             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10222             __put_user(stfs.f_files, &target_stfs->f_files);
10223             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10224             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10225             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10226             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10227             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10228 #ifdef _STATFS_F_FLAGS
10229             __put_user(stfs.f_flags, &target_stfs->f_flags);
10230 #else
10231             __put_user(0, &target_stfs->f_flags);
10232 #endif
10233             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10234             unlock_user_struct(target_stfs, arg3, 1);
10235         }
10236         return ret;
10237     case TARGET_NR_fstatfs64:
10238         ret = get_errno(fstatfs(arg1, &stfs));
10239         goto convert_statfs64;
10240 #endif
10241 #ifdef TARGET_NR_socketcall
10242     case TARGET_NR_socketcall:
10243         return do_socketcall(arg1, arg2);
10244 #endif
10245 #ifdef TARGET_NR_accept
10246     case TARGET_NR_accept:
10247         return do_accept4(arg1, arg2, arg3, 0);
10248 #endif
10249 #ifdef TARGET_NR_accept4
10250     case TARGET_NR_accept4:
10251         return do_accept4(arg1, arg2, arg3, arg4);
10252 #endif
10253 #ifdef TARGET_NR_bind
10254     case TARGET_NR_bind:
10255         return do_bind(arg1, arg2, arg3);
10256 #endif
10257 #ifdef TARGET_NR_connect
10258     case TARGET_NR_connect:
10259         return do_connect(arg1, arg2, arg3);
10260 #endif
10261 #ifdef TARGET_NR_getpeername
10262     case TARGET_NR_getpeername:
10263         return do_getpeername(arg1, arg2, arg3);
10264 #endif
10265 #ifdef TARGET_NR_getsockname
10266     case TARGET_NR_getsockname:
10267         return do_getsockname(arg1, arg2, arg3);
10268 #endif
10269 #ifdef TARGET_NR_getsockopt
10270     case TARGET_NR_getsockopt:
10271         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10272 #endif
10273 #ifdef TARGET_NR_listen
10274     case TARGET_NR_listen:
10275         return get_errno(listen(arg1, arg2));
10276 #endif
10277 #ifdef TARGET_NR_recv
10278     case TARGET_NR_recv:
10279         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10280 #endif
10281 #ifdef TARGET_NR_recvfrom
10282     case TARGET_NR_recvfrom:
10283         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10284 #endif
10285 #ifdef TARGET_NR_recvmsg
10286     case TARGET_NR_recvmsg:
10287         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10288 #endif
10289 #ifdef TARGET_NR_send
10290     case TARGET_NR_send:
10291         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10292 #endif
10293 #ifdef TARGET_NR_sendmsg
10294     case TARGET_NR_sendmsg:
10295         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10296 #endif
10297 #ifdef TARGET_NR_sendmmsg
10298     case TARGET_NR_sendmmsg:
10299         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10300 #endif
10301 #ifdef TARGET_NR_recvmmsg
10302     case TARGET_NR_recvmmsg:
10303         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10304 #endif
10305 #ifdef TARGET_NR_sendto
10306     case TARGET_NR_sendto:
10307         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10308 #endif
10309 #ifdef TARGET_NR_shutdown
10310     case TARGET_NR_shutdown:
10311         return get_errno(shutdown(arg1, arg2));
10312 #endif
10313 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10314     case TARGET_NR_getrandom:
10315         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10316         if (!p) {
10317             return -TARGET_EFAULT;
10318         }
10319         ret = get_errno(getrandom(p, arg2, arg3));
10320         unlock_user(p, arg1, ret);
10321         return ret;
10322 #endif
10323 #ifdef TARGET_NR_socket
10324     case TARGET_NR_socket:
10325         return do_socket(arg1, arg2, arg3);
10326 #endif
10327 #ifdef TARGET_NR_socketpair
10328     case TARGET_NR_socketpair:
10329         return do_socketpair(arg1, arg2, arg3, arg4);
10330 #endif
10331 #ifdef TARGET_NR_setsockopt
10332     case TARGET_NR_setsockopt:
10333         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10334 #endif
10335 #if defined(TARGET_NR_syslog)
10336     case TARGET_NR_syslog:
10337         {
10338             int len = arg2;
10339 
10340             switch (arg1) {
10341             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10342             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10343             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10344             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10345             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10346             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10347             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10348             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10349                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10350             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10351             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10352             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10353                 {
10354                     if (len < 0) {
10355                         return -TARGET_EINVAL;
10356                     }
10357                     if (len == 0) {
10358                         return 0;
10359                     }
10360                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10361                     if (!p) {
10362                         return -TARGET_EFAULT;
10363                     }
10364                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10365                     unlock_user(p, arg2, arg3);
10366                 }
10367                 return ret;
10368             default:
10369                 return -TARGET_EINVAL;
10370             }
10371         }
10372         break;
10373 #endif
10374     case TARGET_NR_setitimer:
10375         {
10376             struct itimerval value, ovalue, *pvalue;
10377 
10378             if (arg2) {
10379                 pvalue = &value;
10380                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10381                     || copy_from_user_timeval(&pvalue->it_value,
10382                                               arg2 + sizeof(struct target_timeval)))
10383                     return -TARGET_EFAULT;
10384             } else {
10385                 pvalue = NULL;
10386             }
10387             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10388             if (!is_error(ret) && arg3) {
10389                 if (copy_to_user_timeval(arg3,
10390                                          &ovalue.it_interval)
10391                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10392                                             &ovalue.it_value))
10393                     return -TARGET_EFAULT;
10394             }
10395         }
10396         return ret;
10397     case TARGET_NR_getitimer:
10398         {
10399             struct itimerval value;
10400 
10401             ret = get_errno(getitimer(arg1, &value));
10402             if (!is_error(ret) && arg2) {
10403                 if (copy_to_user_timeval(arg2,
10404                                          &value.it_interval)
10405                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10406                                             &value.it_value))
10407                     return -TARGET_EFAULT;
10408             }
10409         }
10410         return ret;
10411 #ifdef TARGET_NR_stat
10412     case TARGET_NR_stat:
10413         if (!(p = lock_user_string(arg1))) {
10414             return -TARGET_EFAULT;
10415         }
10416         ret = get_errno(stat(path(p), &st));
10417         unlock_user(p, arg1, 0);
10418         goto do_stat;
10419 #endif
10420 #ifdef TARGET_NR_lstat
10421     case TARGET_NR_lstat:
10422         if (!(p = lock_user_string(arg1))) {
10423             return -TARGET_EFAULT;
10424         }
10425         ret = get_errno(lstat(path(p), &st));
10426         unlock_user(p, arg1, 0);
10427         goto do_stat;
10428 #endif
10429 #ifdef TARGET_NR_fstat
10430     case TARGET_NR_fstat:
10431         {
10432             ret = get_errno(fstat(arg1, &st));
10433 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10434         do_stat:
10435 #endif
10436             if (!is_error(ret)) {
10437                 struct target_stat *target_st;
10438 
10439                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10440                     return -TARGET_EFAULT;
10441                 memset(target_st, 0, sizeof(*target_st));
10442                 __put_user(st.st_dev, &target_st->st_dev);
10443                 __put_user(st.st_ino, &target_st->st_ino);
10444                 __put_user(st.st_mode, &target_st->st_mode);
10445                 __put_user(st.st_uid, &target_st->st_uid);
10446                 __put_user(st.st_gid, &target_st->st_gid);
10447                 __put_user(st.st_nlink, &target_st->st_nlink);
10448                 __put_user(st.st_rdev, &target_st->st_rdev);
10449                 __put_user(st.st_size, &target_st->st_size);
10450                 __put_user(st.st_blksize, &target_st->st_blksize);
10451                 __put_user(st.st_blocks, &target_st->st_blocks);
10452                 __put_user(st.st_atime, &target_st->target_st_atime);
10453                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10454                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10455 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10456                 __put_user(st.st_atim.tv_nsec,
10457                            &target_st->target_st_atime_nsec);
10458                 __put_user(st.st_mtim.tv_nsec,
10459                            &target_st->target_st_mtime_nsec);
10460                 __put_user(st.st_ctim.tv_nsec,
10461                            &target_st->target_st_ctime_nsec);
10462 #endif
10463                 unlock_user_struct(target_st, arg2, 1);
10464             }
10465         }
10466         return ret;
10467 #endif
10468     case TARGET_NR_vhangup:
10469         return get_errno(vhangup());
10470 #ifdef TARGET_NR_syscall
10471     case TARGET_NR_syscall:
10472         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10473                           arg6, arg7, arg8, 0);
10474 #endif
10475 #if defined(TARGET_NR_wait4)
10476     case TARGET_NR_wait4:
10477         {
10478             int status;
10479             abi_long status_ptr = arg2;
10480             struct rusage rusage, *rusage_ptr;
10481             abi_ulong target_rusage = arg4;
10482             abi_long rusage_err;
10483             if (target_rusage)
10484                 rusage_ptr = &rusage;
10485             else
10486                 rusage_ptr = NULL;
10487             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10488             if (!is_error(ret)) {
10489                 if (status_ptr && ret) {
10490                     status = host_to_target_waitstatus(status);
10491                     if (put_user_s32(status, status_ptr))
10492                         return -TARGET_EFAULT;
10493                 }
10494                 if (target_rusage) {
10495                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10496                     if (rusage_err) {
10497                         ret = rusage_err;
10498                     }
10499                 }
10500             }
10501         }
10502         return ret;
10503 #endif
10504 #ifdef TARGET_NR_swapoff
10505     case TARGET_NR_swapoff:
10506         if (!(p = lock_user_string(arg1)))
10507             return -TARGET_EFAULT;
10508         ret = get_errno(swapoff(p));
10509         unlock_user(p, arg1, 0);
10510         return ret;
10511 #endif
10512     case TARGET_NR_sysinfo:
10513         {
10514             struct target_sysinfo *target_value;
10515             struct sysinfo value;
10516             ret = get_errno(sysinfo(&value));
10517             if (!is_error(ret) && arg1)
10518             {
10519                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10520                     return -TARGET_EFAULT;
10521                 __put_user(value.uptime, &target_value->uptime);
10522                 __put_user(value.loads[0], &target_value->loads[0]);
10523                 __put_user(value.loads[1], &target_value->loads[1]);
10524                 __put_user(value.loads[2], &target_value->loads[2]);
10525                 __put_user(value.totalram, &target_value->totalram);
10526                 __put_user(value.freeram, &target_value->freeram);
10527                 __put_user(value.sharedram, &target_value->sharedram);
10528                 __put_user(value.bufferram, &target_value->bufferram);
10529                 __put_user(value.totalswap, &target_value->totalswap);
10530                 __put_user(value.freeswap, &target_value->freeswap);
10531                 __put_user(value.procs, &target_value->procs);
10532                 __put_user(value.totalhigh, &target_value->totalhigh);
10533                 __put_user(value.freehigh, &target_value->freehigh);
10534                 __put_user(value.mem_unit, &target_value->mem_unit);
10535                 unlock_user_struct(target_value, arg1, 1);
10536             }
10537         }
10538         return ret;
10539 #ifdef TARGET_NR_ipc
10540     case TARGET_NR_ipc:
10541         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10542 #endif
10543 #ifdef TARGET_NR_semget
10544     case TARGET_NR_semget:
10545         return get_errno(semget(arg1, arg2, arg3));
10546 #endif
10547 #ifdef TARGET_NR_semop
10548     case TARGET_NR_semop:
10549         return do_semtimedop(arg1, arg2, arg3, 0, false);
10550 #endif
10551 #ifdef TARGET_NR_semtimedop
10552     case TARGET_NR_semtimedop:
10553         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10554 #endif
10555 #ifdef TARGET_NR_semtimedop_time64
10556     case TARGET_NR_semtimedop_time64:
10557         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10558 #endif
10559 #ifdef TARGET_NR_semctl
10560     case TARGET_NR_semctl:
10561         return do_semctl(arg1, arg2, arg3, arg4);
10562 #endif
10563 #ifdef TARGET_NR_msgctl
10564     case TARGET_NR_msgctl:
10565         return do_msgctl(arg1, arg2, arg3);
10566 #endif
10567 #ifdef TARGET_NR_msgget
10568     case TARGET_NR_msgget:
10569         return get_errno(msgget(arg1, arg2));
10570 #endif
10571 #ifdef TARGET_NR_msgrcv
10572     case TARGET_NR_msgrcv:
10573         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10574 #endif
10575 #ifdef TARGET_NR_msgsnd
10576     case TARGET_NR_msgsnd:
10577         return do_msgsnd(arg1, arg2, arg3, arg4);
10578 #endif
10579 #ifdef TARGET_NR_shmget
10580     case TARGET_NR_shmget:
10581         return get_errno(shmget(arg1, arg2, arg3));
10582 #endif
10583 #ifdef TARGET_NR_shmctl
10584     case TARGET_NR_shmctl:
10585         return do_shmctl(arg1, arg2, arg3);
10586 #endif
10587 #ifdef TARGET_NR_shmat
10588     case TARGET_NR_shmat:
10589         return do_shmat(cpu_env, arg1, arg2, arg3);
10590 #endif
10591 #ifdef TARGET_NR_shmdt
10592     case TARGET_NR_shmdt:
10593         return do_shmdt(arg1);
10594 #endif
10595     case TARGET_NR_fsync:
10596         return get_errno(fsync(arg1));
10597     case TARGET_NR_clone:
10598         /* Linux manages to have three different orderings for its
10599          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10600          * match the kernel's CONFIG_CLONE_* settings.
10601          * Microblaze is further special in that it uses a sixth
10602          * implicit argument to clone for the TLS pointer.
10603          */
10604 #if defined(TARGET_MICROBLAZE)
10605         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10606 #elif defined(TARGET_CLONE_BACKWARDS)
10607         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10608 #elif defined(TARGET_CLONE_BACKWARDS2)
10609         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10610 #else
10611         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10612 #endif
10613         return ret;
10614 #ifdef __NR_exit_group
10615         /* new thread calls */
10616     case TARGET_NR_exit_group:
10617         preexit_cleanup(cpu_env, arg1);
10618         return get_errno(exit_group(arg1));
10619 #endif
10620     case TARGET_NR_setdomainname:
10621         if (!(p = lock_user_string(arg1)))
10622             return -TARGET_EFAULT;
10623         ret = get_errno(setdomainname(p, arg2));
10624         unlock_user(p, arg1, 0);
10625         return ret;
10626     case TARGET_NR_uname:
10627         /* no need to transcode because we use the linux syscall */
10628         {
10629             struct new_utsname * buf;
10630 
10631             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10632                 return -TARGET_EFAULT;
10633             ret = get_errno(sys_uname(buf));
10634             if (!is_error(ret)) {
10635                 /* Overwrite the native machine name with whatever is being
10636                    emulated. */
10637                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10638                           sizeof(buf->machine));
10639                 /* Allow the user to override the reported release.  */
10640                 if (qemu_uname_release && *qemu_uname_release) {
10641                     g_strlcpy(buf->release, qemu_uname_release,
10642                               sizeof(buf->release));
10643                 }
10644             }
10645             unlock_user_struct(buf, arg1, 1);
10646         }
10647         return ret;
10648 #ifdef TARGET_I386
10649     case TARGET_NR_modify_ldt:
10650         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10651 #if !defined(TARGET_X86_64)
10652     case TARGET_NR_vm86:
10653         return do_vm86(cpu_env, arg1, arg2);
10654 #endif
10655 #endif
10656 #if defined(TARGET_NR_adjtimex)
10657     case TARGET_NR_adjtimex:
10658         {
10659             struct timex host_buf;
10660 
10661             if (target_to_host_timex(&host_buf, arg1) != 0) {
10662                 return -TARGET_EFAULT;
10663             }
10664             ret = get_errno(adjtimex(&host_buf));
10665             if (!is_error(ret)) {
10666                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10667                     return -TARGET_EFAULT;
10668                 }
10669             }
10670         }
10671         return ret;
10672 #endif
10673 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10674     case TARGET_NR_clock_adjtime:
10675         {
10676             struct timex htx, *phtx = &htx;
10677 
10678             if (target_to_host_timex(phtx, arg2) != 0) {
10679                 return -TARGET_EFAULT;
10680             }
10681             ret = get_errno(clock_adjtime(arg1, phtx));
10682             if (!is_error(ret) && phtx) {
10683                 if (host_to_target_timex(arg2, phtx) != 0) {
10684                     return -TARGET_EFAULT;
10685                 }
10686             }
10687         }
10688         return ret;
10689 #endif
10690 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10691     case TARGET_NR_clock_adjtime64:
10692         {
10693             struct timex htx;
10694 
10695             if (target_to_host_timex64(&htx, arg2) != 0) {
10696                 return -TARGET_EFAULT;
10697             }
10698             ret = get_errno(clock_adjtime(arg1, &htx));
10699             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10700                     return -TARGET_EFAULT;
10701             }
10702         }
10703         return ret;
10704 #endif
10705     case TARGET_NR_getpgid:
10706         return get_errno(getpgid(arg1));
10707     case TARGET_NR_fchdir:
10708         return get_errno(fchdir(arg1));
10709     case TARGET_NR_personality:
10710         return get_errno(personality(arg1));
10711 #ifdef TARGET_NR__llseek /* Not on alpha */
10712     case TARGET_NR__llseek:
10713         {
10714             int64_t res;
10715 #if !defined(__NR_llseek)
10716             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10717             if (res == -1) {
10718                 ret = get_errno(res);
10719             } else {
10720                 ret = 0;
10721             }
10722 #else
10723             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10724 #endif
10725             if ((ret == 0) && put_user_s64(res, arg4)) {
10726                 return -TARGET_EFAULT;
10727             }
10728         }
10729         return ret;
10730 #endif
10731 #ifdef TARGET_NR_getdents
10732     case TARGET_NR_getdents:
10733         return do_getdents(arg1, arg2, arg3);
10734 #endif /* TARGET_NR_getdents */
10735 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10736     case TARGET_NR_getdents64:
10737         return do_getdents64(arg1, arg2, arg3);
10738 #endif /* TARGET_NR_getdents64 */
10739 #if defined(TARGET_NR__newselect)
10740     case TARGET_NR__newselect:
10741         return do_select(arg1, arg2, arg3, arg4, arg5);
10742 #endif
10743 #ifdef TARGET_NR_poll
10744     case TARGET_NR_poll:
10745         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10746 #endif
10747 #ifdef TARGET_NR_ppoll
10748     case TARGET_NR_ppoll:
10749         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10750 #endif
10751 #ifdef TARGET_NR_ppoll_time64
10752     case TARGET_NR_ppoll_time64:
10753         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10754 #endif
10755     case TARGET_NR_flock:
10756         /* NOTE: the flock constant seems to be the same for every
10757            Linux platform */
10758         return get_errno(safe_flock(arg1, arg2));
10759     case TARGET_NR_readv:
10760         {
10761             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10762             if (vec != NULL) {
10763                 ret = get_errno(safe_readv(arg1, vec, arg3));
10764                 unlock_iovec(vec, arg2, arg3, 1);
10765             } else {
10766                 ret = -host_to_target_errno(errno);
10767             }
10768         }
10769         return ret;
10770     case TARGET_NR_writev:
10771         {
10772             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10773             if (vec != NULL) {
10774                 ret = get_errno(safe_writev(arg1, vec, arg3));
10775                 unlock_iovec(vec, arg2, arg3, 0);
10776             } else {
10777                 ret = -host_to_target_errno(errno);
10778             }
10779         }
10780         return ret;
10781 #if defined(TARGET_NR_preadv)
10782     case TARGET_NR_preadv:
10783         {
10784             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10785             if (vec != NULL) {
10786                 unsigned long low, high;
10787 
10788                 target_to_host_low_high(arg4, arg5, &low, &high);
10789                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10790                 unlock_iovec(vec, arg2, arg3, 1);
10791             } else {
10792                 ret = -host_to_target_errno(errno);
10793            }
10794         }
10795         return ret;
10796 #endif
10797 #if defined(TARGET_NR_pwritev)
10798     case TARGET_NR_pwritev:
10799         {
10800             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10801             if (vec != NULL) {
10802                 unsigned long low, high;
10803 
10804                 target_to_host_low_high(arg4, arg5, &low, &high);
10805                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10806                 unlock_iovec(vec, arg2, arg3, 0);
10807             } else {
10808                 ret = -host_to_target_errno(errno);
10809            }
10810         }
10811         return ret;
10812 #endif
10813     case TARGET_NR_getsid:
10814         return get_errno(getsid(arg1));
10815 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10816     case TARGET_NR_fdatasync:
10817         return get_errno(fdatasync(arg1));
10818 #endif
10819     case TARGET_NR_sched_getaffinity:
10820         {
10821             unsigned int mask_size;
10822             unsigned long *mask;
10823 
10824             /*
10825              * sched_getaffinity needs multiples of ulong, so need to take
10826              * care of mismatches between target ulong and host ulong sizes.
10827              */
10828             if (arg2 & (sizeof(abi_ulong) - 1)) {
10829                 return -TARGET_EINVAL;
10830             }
10831             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10832 
10833             mask = alloca(mask_size);
10834             memset(mask, 0, mask_size);
10835             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10836 
10837             if (!is_error(ret)) {
10838                 if (ret > arg2) {
10839                     /* More data returned than the caller's buffer will fit.
10840                      * This only happens if sizeof(abi_long) < sizeof(long)
10841                      * and the caller passed us a buffer holding an odd number
10842                      * of abi_longs. If the host kernel is actually using the
10843                      * extra 4 bytes then fail EINVAL; otherwise we can just
10844                      * ignore them and only copy the interesting part.
10845                      */
10846                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10847                     if (numcpus > arg2 * 8) {
10848                         return -TARGET_EINVAL;
10849                     }
10850                     ret = arg2;
10851                 }
10852 
10853                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10854                     return -TARGET_EFAULT;
10855                 }
10856             }
10857         }
10858         return ret;
10859     case TARGET_NR_sched_setaffinity:
10860         {
10861             unsigned int mask_size;
10862             unsigned long *mask;
10863 
10864             /*
10865              * sched_setaffinity needs multiples of ulong, so need to take
10866              * care of mismatches between target ulong and host ulong sizes.
10867              */
10868             if (arg2 & (sizeof(abi_ulong) - 1)) {
10869                 return -TARGET_EINVAL;
10870             }
10871             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10872             mask = alloca(mask_size);
10873 
10874             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10875             if (ret) {
10876                 return ret;
10877             }
10878 
10879             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10880         }
10881     case TARGET_NR_getcpu:
10882         {
10883             unsigned cpu, node;
10884             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10885                                        arg2 ? &node : NULL,
10886                                        NULL));
10887             if (is_error(ret)) {
10888                 return ret;
10889             }
10890             if (arg1 && put_user_u32(cpu, arg1)) {
10891                 return -TARGET_EFAULT;
10892             }
10893             if (arg2 && put_user_u32(node, arg2)) {
10894                 return -TARGET_EFAULT;
10895             }
10896         }
10897         return ret;
10898     case TARGET_NR_sched_setparam:
10899         {
10900             struct target_sched_param *target_schp;
10901             struct sched_param schp;
10902 
10903             if (arg2 == 0) {
10904                 return -TARGET_EINVAL;
10905             }
10906             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10907                 return -TARGET_EFAULT;
10908             }
10909             schp.sched_priority = tswap32(target_schp->sched_priority);
10910             unlock_user_struct(target_schp, arg2, 0);
10911             return get_errno(sys_sched_setparam(arg1, &schp));
10912         }
10913     case TARGET_NR_sched_getparam:
10914         {
10915             struct target_sched_param *target_schp;
10916             struct sched_param schp;
10917 
10918             if (arg2 == 0) {
10919                 return -TARGET_EINVAL;
10920             }
10921             ret = get_errno(sys_sched_getparam(arg1, &schp));
10922             if (!is_error(ret)) {
10923                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10924                     return -TARGET_EFAULT;
10925                 }
10926                 target_schp->sched_priority = tswap32(schp.sched_priority);
10927                 unlock_user_struct(target_schp, arg2, 1);
10928             }
10929         }
10930         return ret;
10931     case TARGET_NR_sched_setscheduler:
10932         {
10933             struct target_sched_param *target_schp;
10934             struct sched_param schp;
10935             if (arg3 == 0) {
10936                 return -TARGET_EINVAL;
10937             }
10938             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10939                 return -TARGET_EFAULT;
10940             }
10941             schp.sched_priority = tswap32(target_schp->sched_priority);
10942             unlock_user_struct(target_schp, arg3, 0);
10943             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10944         }
10945     case TARGET_NR_sched_getscheduler:
10946         return get_errno(sys_sched_getscheduler(arg1));
10947     case TARGET_NR_sched_getattr:
10948         {
10949             struct target_sched_attr *target_scha;
10950             struct sched_attr scha;
10951             if (arg2 == 0) {
10952                 return -TARGET_EINVAL;
10953             }
10954             if (arg3 > sizeof(scha)) {
10955                 arg3 = sizeof(scha);
10956             }
10957             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10958             if (!is_error(ret)) {
10959                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10960                 if (!target_scha) {
10961                     return -TARGET_EFAULT;
10962                 }
10963                 target_scha->size = tswap32(scha.size);
10964                 target_scha->sched_policy = tswap32(scha.sched_policy);
10965                 target_scha->sched_flags = tswap64(scha.sched_flags);
10966                 target_scha->sched_nice = tswap32(scha.sched_nice);
10967                 target_scha->sched_priority = tswap32(scha.sched_priority);
10968                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10969                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10970                 target_scha->sched_period = tswap64(scha.sched_period);
10971                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10972                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10973                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10974                 }
10975                 unlock_user(target_scha, arg2, arg3);
10976             }
10977             return ret;
10978         }
10979     case TARGET_NR_sched_setattr:
10980         {
10981             struct target_sched_attr *target_scha;
10982             struct sched_attr scha;
10983             uint32_t size;
10984             int zeroed;
10985             if (arg2 == 0) {
10986                 return -TARGET_EINVAL;
10987             }
10988             if (get_user_u32(size, arg2)) {
10989                 return -TARGET_EFAULT;
10990             }
10991             if (!size) {
10992                 size = offsetof(struct target_sched_attr, sched_util_min);
10993             }
10994             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10995                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10996                     return -TARGET_EFAULT;
10997                 }
10998                 return -TARGET_E2BIG;
10999             }
11000 
11001             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11002             if (zeroed < 0) {
11003                 return zeroed;
11004             } else if (zeroed == 0) {
11005                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11006                     return -TARGET_EFAULT;
11007                 }
11008                 return -TARGET_E2BIG;
11009             }
11010             if (size > sizeof(struct target_sched_attr)) {
11011                 size = sizeof(struct target_sched_attr);
11012             }
11013 
11014             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11015             if (!target_scha) {
11016                 return -TARGET_EFAULT;
11017             }
11018             scha.size = size;
11019             scha.sched_policy = tswap32(target_scha->sched_policy);
11020             scha.sched_flags = tswap64(target_scha->sched_flags);
11021             scha.sched_nice = tswap32(target_scha->sched_nice);
11022             scha.sched_priority = tswap32(target_scha->sched_priority);
11023             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11024             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11025             scha.sched_period = tswap64(target_scha->sched_period);
11026             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11027                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11028                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11029             }
11030             unlock_user(target_scha, arg2, 0);
11031             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11032         }
11033     case TARGET_NR_sched_yield:
11034         return get_errno(sched_yield());
11035     case TARGET_NR_sched_get_priority_max:
11036         return get_errno(sched_get_priority_max(arg1));
11037     case TARGET_NR_sched_get_priority_min:
11038         return get_errno(sched_get_priority_min(arg1));
11039 #ifdef TARGET_NR_sched_rr_get_interval
11040     case TARGET_NR_sched_rr_get_interval:
11041         {
11042             struct timespec ts;
11043             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11044             if (!is_error(ret)) {
11045                 ret = host_to_target_timespec(arg2, &ts);
11046             }
11047         }
11048         return ret;
11049 #endif
11050 #ifdef TARGET_NR_sched_rr_get_interval_time64
11051     case TARGET_NR_sched_rr_get_interval_time64:
11052         {
11053             struct timespec ts;
11054             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11055             if (!is_error(ret)) {
11056                 ret = host_to_target_timespec64(arg2, &ts);
11057             }
11058         }
11059         return ret;
11060 #endif
11061 #if defined(TARGET_NR_nanosleep)
11062     case TARGET_NR_nanosleep:
11063         {
11064             struct timespec req, rem;
11065             target_to_host_timespec(&req, arg1);
11066             ret = get_errno(safe_nanosleep(&req, &rem));
11067             if (is_error(ret) && arg2) {
11068                 host_to_target_timespec(arg2, &rem);
11069             }
11070         }
11071         return ret;
11072 #endif
11073     case TARGET_NR_prctl:
11074         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11075         break;
11076 #ifdef TARGET_NR_arch_prctl
11077     case TARGET_NR_arch_prctl:
11078         return do_arch_prctl(cpu_env, arg1, arg2);
11079 #endif
11080 #ifdef TARGET_NR_pread64
11081     case TARGET_NR_pread64:
11082         if (regpairs_aligned(cpu_env, num)) {
11083             arg4 = arg5;
11084             arg5 = arg6;
11085         }
11086         if (arg2 == 0 && arg3 == 0) {
11087             /* Special-case NULL buffer and zero length, which should succeed */
11088             p = 0;
11089         } else {
11090             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11091             if (!p) {
11092                 return -TARGET_EFAULT;
11093             }
11094         }
11095         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11096         unlock_user(p, arg2, ret);
11097         return ret;
11098     case TARGET_NR_pwrite64:
11099         if (regpairs_aligned(cpu_env, num)) {
11100             arg4 = arg5;
11101             arg5 = arg6;
11102         }
11103         if (arg2 == 0 && arg3 == 0) {
11104             /* Special-case NULL buffer and zero length, which should succeed */
11105             p = 0;
11106         } else {
11107             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11108             if (!p) {
11109                 return -TARGET_EFAULT;
11110             }
11111         }
11112         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11113         unlock_user(p, arg2, 0);
11114         return ret;
11115 #endif
11116     case TARGET_NR_getcwd:
11117         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11118             return -TARGET_EFAULT;
11119         ret = get_errno(sys_getcwd1(p, arg2));
11120         unlock_user(p, arg1, ret);
11121         return ret;
11122     case TARGET_NR_capget:
11123     case TARGET_NR_capset:
11124     {
11125         struct target_user_cap_header *target_header;
11126         struct target_user_cap_data *target_data = NULL;
11127         struct __user_cap_header_struct header;
11128         struct __user_cap_data_struct data[2];
11129         struct __user_cap_data_struct *dataptr = NULL;
11130         int i, target_datalen;
11131         int data_items = 1;
11132 
11133         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11134             return -TARGET_EFAULT;
11135         }
11136         header.version = tswap32(target_header->version);
11137         header.pid = tswap32(target_header->pid);
11138 
11139         if (header.version != _LINUX_CAPABILITY_VERSION) {
11140             /* Version 2 and up takes pointer to two user_data structs */
11141             data_items = 2;
11142         }
11143 
11144         target_datalen = sizeof(*target_data) * data_items;
11145 
11146         if (arg2) {
11147             if (num == TARGET_NR_capget) {
11148                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11149             } else {
11150                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11151             }
11152             if (!target_data) {
11153                 unlock_user_struct(target_header, arg1, 0);
11154                 return -TARGET_EFAULT;
11155             }
11156 
11157             if (num == TARGET_NR_capset) {
11158                 for (i = 0; i < data_items; i++) {
11159                     data[i].effective = tswap32(target_data[i].effective);
11160                     data[i].permitted = tswap32(target_data[i].permitted);
11161                     data[i].inheritable = tswap32(target_data[i].inheritable);
11162                 }
11163             }
11164 
11165             dataptr = data;
11166         }
11167 
11168         if (num == TARGET_NR_capget) {
11169             ret = get_errno(capget(&header, dataptr));
11170         } else {
11171             ret = get_errno(capset(&header, dataptr));
11172         }
11173 
11174         /* The kernel always updates version for both capget and capset */
11175         target_header->version = tswap32(header.version);
11176         unlock_user_struct(target_header, arg1, 1);
11177 
11178         if (arg2) {
11179             if (num == TARGET_NR_capget) {
11180                 for (i = 0; i < data_items; i++) {
11181                     target_data[i].effective = tswap32(data[i].effective);
11182                     target_data[i].permitted = tswap32(data[i].permitted);
11183                     target_data[i].inheritable = tswap32(data[i].inheritable);
11184                 }
11185                 unlock_user(target_data, arg2, target_datalen);
11186             } else {
11187                 unlock_user(target_data, arg2, 0);
11188             }
11189         }
11190         return ret;
11191     }
11192     case TARGET_NR_sigaltstack:
11193         return do_sigaltstack(arg1, arg2, cpu_env);
11194 
11195 #ifdef CONFIG_SENDFILE
11196 #ifdef TARGET_NR_sendfile
11197     case TARGET_NR_sendfile:
11198     {
11199         off_t *offp = NULL;
11200         off_t off;
11201         if (arg3) {
11202             ret = get_user_sal(off, arg3);
11203             if (is_error(ret)) {
11204                 return ret;
11205             }
11206             offp = &off;
11207         }
11208         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11209         if (!is_error(ret) && arg3) {
11210             abi_long ret2 = put_user_sal(off, arg3);
11211             if (is_error(ret2)) {
11212                 ret = ret2;
11213             }
11214         }
11215         return ret;
11216     }
11217 #endif
11218 #ifdef TARGET_NR_sendfile64
11219     case TARGET_NR_sendfile64:
11220     {
11221         off_t *offp = NULL;
11222         off_t off;
11223         if (arg3) {
11224             ret = get_user_s64(off, arg3);
11225             if (is_error(ret)) {
11226                 return ret;
11227             }
11228             offp = &off;
11229         }
11230         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11231         if (!is_error(ret) && arg3) {
11232             abi_long ret2 = put_user_s64(off, arg3);
11233             if (is_error(ret2)) {
11234                 ret = ret2;
11235             }
11236         }
11237         return ret;
11238     }
11239 #endif
11240 #endif
11241 #ifdef TARGET_NR_vfork
11242     case TARGET_NR_vfork:
11243         return get_errno(do_fork(cpu_env,
11244                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11245                          0, 0, 0, 0));
11246 #endif
11247 #ifdef TARGET_NR_ugetrlimit
11248     case TARGET_NR_ugetrlimit:
11249     {
11250 	struct rlimit rlim;
11251 	int resource = target_to_host_resource(arg1);
11252 	ret = get_errno(getrlimit(resource, &rlim));
11253 	if (!is_error(ret)) {
11254 	    struct target_rlimit *target_rlim;
11255             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11256                 return -TARGET_EFAULT;
11257 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11258 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11259             unlock_user_struct(target_rlim, arg2, 1);
11260 	}
11261         return ret;
11262     }
11263 #endif
11264 #ifdef TARGET_NR_truncate64
11265     case TARGET_NR_truncate64:
11266         if (!(p = lock_user_string(arg1)))
11267             return -TARGET_EFAULT;
11268 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11269         unlock_user(p, arg1, 0);
11270         return ret;
11271 #endif
11272 #ifdef TARGET_NR_ftruncate64
11273     case TARGET_NR_ftruncate64:
11274         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11275 #endif
11276 #ifdef TARGET_NR_stat64
11277     case TARGET_NR_stat64:
11278         if (!(p = lock_user_string(arg1))) {
11279             return -TARGET_EFAULT;
11280         }
11281         ret = get_errno(stat(path(p), &st));
11282         unlock_user(p, arg1, 0);
11283         if (!is_error(ret))
11284             ret = host_to_target_stat64(cpu_env, arg2, &st);
11285         return ret;
11286 #endif
11287 #ifdef TARGET_NR_lstat64
11288     case TARGET_NR_lstat64:
11289         if (!(p = lock_user_string(arg1))) {
11290             return -TARGET_EFAULT;
11291         }
11292         ret = get_errno(lstat(path(p), &st));
11293         unlock_user(p, arg1, 0);
11294         if (!is_error(ret))
11295             ret = host_to_target_stat64(cpu_env, arg2, &st);
11296         return ret;
11297 #endif
11298 #ifdef TARGET_NR_fstat64
11299     case TARGET_NR_fstat64:
11300         ret = get_errno(fstat(arg1, &st));
11301         if (!is_error(ret))
11302             ret = host_to_target_stat64(cpu_env, arg2, &st);
11303         return ret;
11304 #endif
11305 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11306 #ifdef TARGET_NR_fstatat64
11307     case TARGET_NR_fstatat64:
11308 #endif
11309 #ifdef TARGET_NR_newfstatat
11310     case TARGET_NR_newfstatat:
11311 #endif
11312         if (!(p = lock_user_string(arg2))) {
11313             return -TARGET_EFAULT;
11314         }
11315         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11316         unlock_user(p, arg2, 0);
11317         if (!is_error(ret))
11318             ret = host_to_target_stat64(cpu_env, arg3, &st);
11319         return ret;
11320 #endif
11321 #if defined(TARGET_NR_statx)
11322     case TARGET_NR_statx:
11323         {
11324             struct target_statx *target_stx;
11325             int dirfd = arg1;
11326             int flags = arg3;
11327 
11328             p = lock_user_string(arg2);
11329             if (p == NULL) {
11330                 return -TARGET_EFAULT;
11331             }
11332 #if defined(__NR_statx)
11333             {
11334                 /*
11335                  * It is assumed that struct statx is architecture independent.
11336                  */
11337                 struct target_statx host_stx;
11338                 int mask = arg4;
11339 
11340                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11341                 if (!is_error(ret)) {
11342                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11343                         unlock_user(p, arg2, 0);
11344                         return -TARGET_EFAULT;
11345                     }
11346                 }
11347 
11348                 if (ret != -TARGET_ENOSYS) {
11349                     unlock_user(p, arg2, 0);
11350                     return ret;
11351                 }
11352             }
11353 #endif
11354             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11355             unlock_user(p, arg2, 0);
11356 
11357             if (!is_error(ret)) {
11358                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11359                     return -TARGET_EFAULT;
11360                 }
11361                 memset(target_stx, 0, sizeof(*target_stx));
11362                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11363                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11364                 __put_user(st.st_ino, &target_stx->stx_ino);
11365                 __put_user(st.st_mode, &target_stx->stx_mode);
11366                 __put_user(st.st_uid, &target_stx->stx_uid);
11367                 __put_user(st.st_gid, &target_stx->stx_gid);
11368                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11369                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11370                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11371                 __put_user(st.st_size, &target_stx->stx_size);
11372                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11373                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11374                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11375                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11376                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11377                 unlock_user_struct(target_stx, arg5, 1);
11378             }
11379         }
11380         return ret;
11381 #endif
11382 #ifdef TARGET_NR_lchown
11383     case TARGET_NR_lchown:
11384         if (!(p = lock_user_string(arg1)))
11385             return -TARGET_EFAULT;
11386         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11387         unlock_user(p, arg1, 0);
11388         return ret;
11389 #endif
11390 #ifdef TARGET_NR_getuid
11391     case TARGET_NR_getuid:
11392         return get_errno(high2lowuid(getuid()));
11393 #endif
11394 #ifdef TARGET_NR_getgid
11395     case TARGET_NR_getgid:
11396         return get_errno(high2lowgid(getgid()));
11397 #endif
11398 #ifdef TARGET_NR_geteuid
11399     case TARGET_NR_geteuid:
11400         return get_errno(high2lowuid(geteuid()));
11401 #endif
11402 #ifdef TARGET_NR_getegid
11403     case TARGET_NR_getegid:
11404         return get_errno(high2lowgid(getegid()));
11405 #endif
11406     case TARGET_NR_setreuid:
11407         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11408     case TARGET_NR_setregid:
11409         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11410     case TARGET_NR_getgroups:
11411         {
11412             int gidsetsize = arg1;
11413             target_id *target_grouplist;
11414             gid_t *grouplist;
11415             int i;
11416 
11417             grouplist = alloca(gidsetsize * sizeof(gid_t));
11418             ret = get_errno(getgroups(gidsetsize, grouplist));
11419             if (gidsetsize == 0)
11420                 return ret;
11421             if (!is_error(ret)) {
11422                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11423                 if (!target_grouplist)
11424                     return -TARGET_EFAULT;
11425                 for(i = 0;i < ret; i++)
11426                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11427                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11428             }
11429         }
11430         return ret;
11431     case TARGET_NR_setgroups:
11432         {
11433             int gidsetsize = arg1;
11434             target_id *target_grouplist;
11435             gid_t *grouplist = NULL;
11436             int i;
11437             if (gidsetsize) {
11438                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11439                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11440                 if (!target_grouplist) {
11441                     return -TARGET_EFAULT;
11442                 }
11443                 for (i = 0; i < gidsetsize; i++) {
11444                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11445                 }
11446                 unlock_user(target_grouplist, arg2, 0);
11447             }
11448             return get_errno(setgroups(gidsetsize, grouplist));
11449         }
11450     case TARGET_NR_fchown:
11451         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11452 #if defined(TARGET_NR_fchownat)
11453     case TARGET_NR_fchownat:
11454         if (!(p = lock_user_string(arg2)))
11455             return -TARGET_EFAULT;
11456         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11457                                  low2highgid(arg4), arg5));
11458         unlock_user(p, arg2, 0);
11459         return ret;
11460 #endif
11461 #ifdef TARGET_NR_setresuid
11462     case TARGET_NR_setresuid:
11463         return get_errno(sys_setresuid(low2highuid(arg1),
11464                                        low2highuid(arg2),
11465                                        low2highuid(arg3)));
11466 #endif
11467 #ifdef TARGET_NR_getresuid
11468     case TARGET_NR_getresuid:
11469         {
11470             uid_t ruid, euid, suid;
11471             ret = get_errno(getresuid(&ruid, &euid, &suid));
11472             if (!is_error(ret)) {
11473                 if (put_user_id(high2lowuid(ruid), arg1)
11474                     || put_user_id(high2lowuid(euid), arg2)
11475                     || put_user_id(high2lowuid(suid), arg3))
11476                     return -TARGET_EFAULT;
11477             }
11478         }
11479         return ret;
11480 #endif
11481 #ifdef TARGET_NR_getresgid
11482     case TARGET_NR_setresgid:
11483         return get_errno(sys_setresgid(low2highgid(arg1),
11484                                        low2highgid(arg2),
11485                                        low2highgid(arg3)));
11486 #endif
11487 #ifdef TARGET_NR_getresgid
11488     case TARGET_NR_getresgid:
11489         {
11490             gid_t rgid, egid, sgid;
11491             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11492             if (!is_error(ret)) {
11493                 if (put_user_id(high2lowgid(rgid), arg1)
11494                     || put_user_id(high2lowgid(egid), arg2)
11495                     || put_user_id(high2lowgid(sgid), arg3))
11496                     return -TARGET_EFAULT;
11497             }
11498         }
11499         return ret;
11500 #endif
11501 #ifdef TARGET_NR_chown
11502     case TARGET_NR_chown:
11503         if (!(p = lock_user_string(arg1)))
11504             return -TARGET_EFAULT;
11505         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11506         unlock_user(p, arg1, 0);
11507         return ret;
11508 #endif
11509     case TARGET_NR_setuid:
11510         return get_errno(sys_setuid(low2highuid(arg1)));
11511     case TARGET_NR_setgid:
11512         return get_errno(sys_setgid(low2highgid(arg1)));
11513     case TARGET_NR_setfsuid:
11514         return get_errno(setfsuid(arg1));
11515     case TARGET_NR_setfsgid:
11516         return get_errno(setfsgid(arg1));
11517 
11518 #ifdef TARGET_NR_lchown32
11519     case TARGET_NR_lchown32:
11520         if (!(p = lock_user_string(arg1)))
11521             return -TARGET_EFAULT;
11522         ret = get_errno(lchown(p, arg2, arg3));
11523         unlock_user(p, arg1, 0);
11524         return ret;
11525 #endif
11526 #ifdef TARGET_NR_getuid32
11527     case TARGET_NR_getuid32:
11528         return get_errno(getuid());
11529 #endif
11530 
11531 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11532    /* Alpha specific */
11533     case TARGET_NR_getxuid:
11534          {
11535             uid_t euid;
11536             euid=geteuid();
11537             cpu_env->ir[IR_A4]=euid;
11538          }
11539         return get_errno(getuid());
11540 #endif
11541 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11542    /* Alpha specific */
11543     case TARGET_NR_getxgid:
11544          {
11545             uid_t egid;
11546             egid=getegid();
11547             cpu_env->ir[IR_A4]=egid;
11548          }
11549         return get_errno(getgid());
11550 #endif
11551 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11552     /* Alpha specific */
11553     case TARGET_NR_osf_getsysinfo:
11554         ret = -TARGET_EOPNOTSUPP;
11555         switch (arg1) {
11556           case TARGET_GSI_IEEE_FP_CONTROL:
11557             {
11558                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11559                 uint64_t swcr = cpu_env->swcr;
11560 
11561                 swcr &= ~SWCR_STATUS_MASK;
11562                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11563 
11564                 if (put_user_u64 (swcr, arg2))
11565                         return -TARGET_EFAULT;
11566                 ret = 0;
11567             }
11568             break;
11569 
11570           /* case GSI_IEEE_STATE_AT_SIGNAL:
11571              -- Not implemented in linux kernel.
11572              case GSI_UACPROC:
11573              -- Retrieves current unaligned access state; not much used.
11574              case GSI_PROC_TYPE:
11575              -- Retrieves implver information; surely not used.
11576              case GSI_GET_HWRPB:
11577              -- Grabs a copy of the HWRPB; surely not used.
11578           */
11579         }
11580         return ret;
11581 #endif
11582 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11583     /* Alpha specific */
11584     case TARGET_NR_osf_setsysinfo:
11585         ret = -TARGET_EOPNOTSUPP;
11586         switch (arg1) {
11587           case TARGET_SSI_IEEE_FP_CONTROL:
11588             {
11589                 uint64_t swcr, fpcr;
11590 
11591                 if (get_user_u64 (swcr, arg2)) {
11592                     return -TARGET_EFAULT;
11593                 }
11594 
11595                 /*
11596                  * The kernel calls swcr_update_status to update the
11597                  * status bits from the fpcr at every point that it
11598                  * could be queried.  Therefore, we store the status
11599                  * bits only in FPCR.
11600                  */
11601                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11602 
11603                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11604                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11605                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11606                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11607                 ret = 0;
11608             }
11609             break;
11610 
11611           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11612             {
11613                 uint64_t exc, fpcr, fex;
11614 
11615                 if (get_user_u64(exc, arg2)) {
11616                     return -TARGET_EFAULT;
11617                 }
11618                 exc &= SWCR_STATUS_MASK;
11619                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11620 
11621                 /* Old exceptions are not signaled.  */
11622                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11623                 fex = exc & ~fex;
11624                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11625                 fex &= (cpu_env)->swcr;
11626 
11627                 /* Update the hardware fpcr.  */
11628                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11629                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11630 
11631                 if (fex) {
11632                     int si_code = TARGET_FPE_FLTUNK;
11633                     target_siginfo_t info;
11634 
11635                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11636                         si_code = TARGET_FPE_FLTUND;
11637                     }
11638                     if (fex & SWCR_TRAP_ENABLE_INE) {
11639                         si_code = TARGET_FPE_FLTRES;
11640                     }
11641                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11642                         si_code = TARGET_FPE_FLTUND;
11643                     }
11644                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11645                         si_code = TARGET_FPE_FLTOVF;
11646                     }
11647                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11648                         si_code = TARGET_FPE_FLTDIV;
11649                     }
11650                     if (fex & SWCR_TRAP_ENABLE_INV) {
11651                         si_code = TARGET_FPE_FLTINV;
11652                     }
11653 
11654                     info.si_signo = SIGFPE;
11655                     info.si_errno = 0;
11656                     info.si_code = si_code;
11657                     info._sifields._sigfault._addr = (cpu_env)->pc;
11658                     queue_signal(cpu_env, info.si_signo,
11659                                  QEMU_SI_FAULT, &info);
11660                 }
11661                 ret = 0;
11662             }
11663             break;
11664 
11665           /* case SSI_NVPAIRS:
11666              -- Used with SSIN_UACPROC to enable unaligned accesses.
11667              case SSI_IEEE_STATE_AT_SIGNAL:
11668              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11669              -- Not implemented in linux kernel
11670           */
11671         }
11672         return ret;
11673 #endif
11674 #ifdef TARGET_NR_osf_sigprocmask
11675     /* Alpha specific.  */
11676     case TARGET_NR_osf_sigprocmask:
11677         {
11678             abi_ulong mask;
11679             int how;
11680             sigset_t set, oldset;
11681 
11682             switch(arg1) {
11683             case TARGET_SIG_BLOCK:
11684                 how = SIG_BLOCK;
11685                 break;
11686             case TARGET_SIG_UNBLOCK:
11687                 how = SIG_UNBLOCK;
11688                 break;
11689             case TARGET_SIG_SETMASK:
11690                 how = SIG_SETMASK;
11691                 break;
11692             default:
11693                 return -TARGET_EINVAL;
11694             }
11695             mask = arg2;
11696             target_to_host_old_sigset(&set, &mask);
11697             ret = do_sigprocmask(how, &set, &oldset);
11698             if (!ret) {
11699                 host_to_target_old_sigset(&mask, &oldset);
11700                 ret = mask;
11701             }
11702         }
11703         return ret;
11704 #endif
11705 
11706 #ifdef TARGET_NR_getgid32
11707     case TARGET_NR_getgid32:
11708         return get_errno(getgid());
11709 #endif
11710 #ifdef TARGET_NR_geteuid32
11711     case TARGET_NR_geteuid32:
11712         return get_errno(geteuid());
11713 #endif
11714 #ifdef TARGET_NR_getegid32
11715     case TARGET_NR_getegid32:
11716         return get_errno(getegid());
11717 #endif
11718 #ifdef TARGET_NR_setreuid32
11719     case TARGET_NR_setreuid32:
11720         return get_errno(setreuid(arg1, arg2));
11721 #endif
11722 #ifdef TARGET_NR_setregid32
11723     case TARGET_NR_setregid32:
11724         return get_errno(setregid(arg1, arg2));
11725 #endif
11726 #ifdef TARGET_NR_getgroups32
11727     case TARGET_NR_getgroups32:
11728         {
11729             int gidsetsize = arg1;
11730             uint32_t *target_grouplist;
11731             gid_t *grouplist;
11732             int i;
11733 
11734             grouplist = alloca(gidsetsize * sizeof(gid_t));
11735             ret = get_errno(getgroups(gidsetsize, grouplist));
11736             if (gidsetsize == 0)
11737                 return ret;
11738             if (!is_error(ret)) {
11739                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11740                 if (!target_grouplist) {
11741                     return -TARGET_EFAULT;
11742                 }
11743                 for(i = 0;i < ret; i++)
11744                     target_grouplist[i] = tswap32(grouplist[i]);
11745                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11746             }
11747         }
11748         return ret;
11749 #endif
11750 #ifdef TARGET_NR_setgroups32
11751     case TARGET_NR_setgroups32:
11752         {
11753             int gidsetsize = arg1;
11754             uint32_t *target_grouplist;
11755             gid_t *grouplist;
11756             int i;
11757 
11758             grouplist = alloca(gidsetsize * sizeof(gid_t));
11759             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11760             if (!target_grouplist) {
11761                 return -TARGET_EFAULT;
11762             }
11763             for(i = 0;i < gidsetsize; i++)
11764                 grouplist[i] = tswap32(target_grouplist[i]);
11765             unlock_user(target_grouplist, arg2, 0);
11766             return get_errno(setgroups(gidsetsize, grouplist));
11767         }
11768 #endif
11769 #ifdef TARGET_NR_fchown32
11770     case TARGET_NR_fchown32:
11771         return get_errno(fchown(arg1, arg2, arg3));
11772 #endif
11773 #ifdef TARGET_NR_setresuid32
11774     case TARGET_NR_setresuid32:
11775         return get_errno(sys_setresuid(arg1, arg2, arg3));
11776 #endif
11777 #ifdef TARGET_NR_getresuid32
11778     case TARGET_NR_getresuid32:
11779         {
11780             uid_t ruid, euid, suid;
11781             ret = get_errno(getresuid(&ruid, &euid, &suid));
11782             if (!is_error(ret)) {
11783                 if (put_user_u32(ruid, arg1)
11784                     || put_user_u32(euid, arg2)
11785                     || put_user_u32(suid, arg3))
11786                     return -TARGET_EFAULT;
11787             }
11788         }
11789         return ret;
11790 #endif
11791 #ifdef TARGET_NR_setresgid32
11792     case TARGET_NR_setresgid32:
11793         return get_errno(sys_setresgid(arg1, arg2, arg3));
11794 #endif
11795 #ifdef TARGET_NR_getresgid32
11796     case TARGET_NR_getresgid32:
11797         {
11798             gid_t rgid, egid, sgid;
11799             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11800             if (!is_error(ret)) {
11801                 if (put_user_u32(rgid, arg1)
11802                     || put_user_u32(egid, arg2)
11803                     || put_user_u32(sgid, arg3))
11804                     return -TARGET_EFAULT;
11805             }
11806         }
11807         return ret;
11808 #endif
11809 #ifdef TARGET_NR_chown32
11810     case TARGET_NR_chown32:
11811         if (!(p = lock_user_string(arg1)))
11812             return -TARGET_EFAULT;
11813         ret = get_errno(chown(p, arg2, arg3));
11814         unlock_user(p, arg1, 0);
11815         return ret;
11816 #endif
11817 #ifdef TARGET_NR_setuid32
11818     case TARGET_NR_setuid32:
11819         return get_errno(sys_setuid(arg1));
11820 #endif
11821 #ifdef TARGET_NR_setgid32
11822     case TARGET_NR_setgid32:
11823         return get_errno(sys_setgid(arg1));
11824 #endif
11825 #ifdef TARGET_NR_setfsuid32
11826     case TARGET_NR_setfsuid32:
11827         return get_errno(setfsuid(arg1));
11828 #endif
11829 #ifdef TARGET_NR_setfsgid32
11830     case TARGET_NR_setfsgid32:
11831         return get_errno(setfsgid(arg1));
11832 #endif
11833 #ifdef TARGET_NR_mincore
11834     case TARGET_NR_mincore:
11835         {
11836             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11837             if (!a) {
11838                 return -TARGET_ENOMEM;
11839             }
11840             p = lock_user_string(arg3);
11841             if (!p) {
11842                 ret = -TARGET_EFAULT;
11843             } else {
11844                 ret = get_errno(mincore(a, arg2, p));
11845                 unlock_user(p, arg3, ret);
11846             }
11847             unlock_user(a, arg1, 0);
11848         }
11849         return ret;
11850 #endif
11851 #ifdef TARGET_NR_arm_fadvise64_64
11852     case TARGET_NR_arm_fadvise64_64:
11853         /* arm_fadvise64_64 looks like fadvise64_64 but
11854          * with different argument order: fd, advice, offset, len
11855          * rather than the usual fd, offset, len, advice.
11856          * Note that offset and len are both 64-bit so appear as
11857          * pairs of 32-bit registers.
11858          */
11859         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11860                             target_offset64(arg5, arg6), arg2);
11861         return -host_to_target_errno(ret);
11862 #endif
11863 
11864 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11865 
11866 #ifdef TARGET_NR_fadvise64_64
11867     case TARGET_NR_fadvise64_64:
11868 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11869         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11870         ret = arg2;
11871         arg2 = arg3;
11872         arg3 = arg4;
11873         arg4 = arg5;
11874         arg5 = arg6;
11875         arg6 = ret;
11876 #else
11877         /* 6 args: fd, offset (high, low), len (high, low), advice */
11878         if (regpairs_aligned(cpu_env, num)) {
11879             /* offset is in (3,4), len in (5,6) and advice in 7 */
11880             arg2 = arg3;
11881             arg3 = arg4;
11882             arg4 = arg5;
11883             arg5 = arg6;
11884             arg6 = arg7;
11885         }
11886 #endif
11887         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11888                             target_offset64(arg4, arg5), arg6);
11889         return -host_to_target_errno(ret);
11890 #endif
11891 
11892 #ifdef TARGET_NR_fadvise64
11893     case TARGET_NR_fadvise64:
11894         /* 5 args: fd, offset (high, low), len, advice */
11895         if (regpairs_aligned(cpu_env, num)) {
11896             /* offset is in (3,4), len in 5 and advice in 6 */
11897             arg2 = arg3;
11898             arg3 = arg4;
11899             arg4 = arg5;
11900             arg5 = arg6;
11901         }
11902         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11903         return -host_to_target_errno(ret);
11904 #endif
11905 
11906 #else /* not a 32-bit ABI */
11907 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11908 #ifdef TARGET_NR_fadvise64_64
11909     case TARGET_NR_fadvise64_64:
11910 #endif
11911 #ifdef TARGET_NR_fadvise64
11912     case TARGET_NR_fadvise64:
11913 #endif
11914 #ifdef TARGET_S390X
11915         switch (arg4) {
11916         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11917         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11918         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11919         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11920         default: break;
11921         }
11922 #endif
11923         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11924 #endif
11925 #endif /* end of 64-bit ABI fadvise handling */
11926 
11927 #ifdef TARGET_NR_madvise
11928     case TARGET_NR_madvise:
11929         return target_madvise(arg1, arg2, arg3);
11930 #endif
11931 #ifdef TARGET_NR_fcntl64
11932     case TARGET_NR_fcntl64:
11933     {
11934         int cmd;
11935         struct flock64 fl;
11936         from_flock64_fn *copyfrom = copy_from_user_flock64;
11937         to_flock64_fn *copyto = copy_to_user_flock64;
11938 
11939 #ifdef TARGET_ARM
11940         if (!cpu_env->eabi) {
11941             copyfrom = copy_from_user_oabi_flock64;
11942             copyto = copy_to_user_oabi_flock64;
11943         }
11944 #endif
11945 
11946         cmd = target_to_host_fcntl_cmd(arg2);
11947         if (cmd == -TARGET_EINVAL) {
11948             return cmd;
11949         }
11950 
11951         switch(arg2) {
11952         case TARGET_F_GETLK64:
11953             ret = copyfrom(&fl, arg3);
11954             if (ret) {
11955                 break;
11956             }
11957             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11958             if (ret == 0) {
11959                 ret = copyto(arg3, &fl);
11960             }
11961 	    break;
11962 
11963         case TARGET_F_SETLK64:
11964         case TARGET_F_SETLKW64:
11965             ret = copyfrom(&fl, arg3);
11966             if (ret) {
11967                 break;
11968             }
11969             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11970 	    break;
11971         default:
11972             ret = do_fcntl(arg1, arg2, arg3);
11973             break;
11974         }
11975         return ret;
11976     }
11977 #endif
11978 #ifdef TARGET_NR_cacheflush
11979     case TARGET_NR_cacheflush:
11980         /* self-modifying code is handled automatically, so nothing needed */
11981         return 0;
11982 #endif
11983 #ifdef TARGET_NR_getpagesize
11984     case TARGET_NR_getpagesize:
11985         return TARGET_PAGE_SIZE;
11986 #endif
11987     case TARGET_NR_gettid:
11988         return get_errno(sys_gettid());
11989 #ifdef TARGET_NR_readahead
11990     case TARGET_NR_readahead:
11991 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11992         if (regpairs_aligned(cpu_env, num)) {
11993             arg2 = arg3;
11994             arg3 = arg4;
11995             arg4 = arg5;
11996         }
11997         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11998 #else
11999         ret = get_errno(readahead(arg1, arg2, arg3));
12000 #endif
12001         return ret;
12002 #endif
12003 #ifdef CONFIG_ATTR
12004 #ifdef TARGET_NR_setxattr
12005     case TARGET_NR_listxattr:
12006     case TARGET_NR_llistxattr:
12007     {
12008         void *p, *b = 0;
12009         if (arg2) {
12010             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12011             if (!b) {
12012                 return -TARGET_EFAULT;
12013             }
12014         }
12015         p = lock_user_string(arg1);
12016         if (p) {
12017             if (num == TARGET_NR_listxattr) {
12018                 ret = get_errno(listxattr(p, b, arg3));
12019             } else {
12020                 ret = get_errno(llistxattr(p, b, arg3));
12021             }
12022         } else {
12023             ret = -TARGET_EFAULT;
12024         }
12025         unlock_user(p, arg1, 0);
12026         unlock_user(b, arg2, arg3);
12027         return ret;
12028     }
12029     case TARGET_NR_flistxattr:
12030     {
12031         void *b = 0;
12032         if (arg2) {
12033             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12034             if (!b) {
12035                 return -TARGET_EFAULT;
12036             }
12037         }
12038         ret = get_errno(flistxattr(arg1, b, arg3));
12039         unlock_user(b, arg2, arg3);
12040         return ret;
12041     }
12042     case TARGET_NR_setxattr:
12043     case TARGET_NR_lsetxattr:
12044         {
12045             void *p, *n, *v = 0;
12046             if (arg3) {
12047                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12048                 if (!v) {
12049                     return -TARGET_EFAULT;
12050                 }
12051             }
12052             p = lock_user_string(arg1);
12053             n = lock_user_string(arg2);
12054             if (p && n) {
12055                 if (num == TARGET_NR_setxattr) {
12056                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12057                 } else {
12058                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12059                 }
12060             } else {
12061                 ret = -TARGET_EFAULT;
12062             }
12063             unlock_user(p, arg1, 0);
12064             unlock_user(n, arg2, 0);
12065             unlock_user(v, arg3, 0);
12066         }
12067         return ret;
12068     case TARGET_NR_fsetxattr:
12069         {
12070             void *n, *v = 0;
12071             if (arg3) {
12072                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12073                 if (!v) {
12074                     return -TARGET_EFAULT;
12075                 }
12076             }
12077             n = lock_user_string(arg2);
12078             if (n) {
12079                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12080             } else {
12081                 ret = -TARGET_EFAULT;
12082             }
12083             unlock_user(n, arg2, 0);
12084             unlock_user(v, arg3, 0);
12085         }
12086         return ret;
12087     case TARGET_NR_getxattr:
12088     case TARGET_NR_lgetxattr:
12089         {
12090             void *p, *n, *v = 0;
12091             if (arg3) {
12092                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12093                 if (!v) {
12094                     return -TARGET_EFAULT;
12095                 }
12096             }
12097             p = lock_user_string(arg1);
12098             n = lock_user_string(arg2);
12099             if (p && n) {
12100                 if (num == TARGET_NR_getxattr) {
12101                     ret = get_errno(getxattr(p, n, v, arg4));
12102                 } else {
12103                     ret = get_errno(lgetxattr(p, n, v, arg4));
12104                 }
12105             } else {
12106                 ret = -TARGET_EFAULT;
12107             }
12108             unlock_user(p, arg1, 0);
12109             unlock_user(n, arg2, 0);
12110             unlock_user(v, arg3, arg4);
12111         }
12112         return ret;
12113     case TARGET_NR_fgetxattr:
12114         {
12115             void *n, *v = 0;
12116             if (arg3) {
12117                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12118                 if (!v) {
12119                     return -TARGET_EFAULT;
12120                 }
12121             }
12122             n = lock_user_string(arg2);
12123             if (n) {
12124                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12125             } else {
12126                 ret = -TARGET_EFAULT;
12127             }
12128             unlock_user(n, arg2, 0);
12129             unlock_user(v, arg3, arg4);
12130         }
12131         return ret;
12132     case TARGET_NR_removexattr:
12133     case TARGET_NR_lremovexattr:
12134         {
12135             void *p, *n;
12136             p = lock_user_string(arg1);
12137             n = lock_user_string(arg2);
12138             if (p && n) {
12139                 if (num == TARGET_NR_removexattr) {
12140                     ret = get_errno(removexattr(p, n));
12141                 } else {
12142                     ret = get_errno(lremovexattr(p, n));
12143                 }
12144             } else {
12145                 ret = -TARGET_EFAULT;
12146             }
12147             unlock_user(p, arg1, 0);
12148             unlock_user(n, arg2, 0);
12149         }
12150         return ret;
12151     case TARGET_NR_fremovexattr:
12152         {
12153             void *n;
12154             n = lock_user_string(arg2);
12155             if (n) {
12156                 ret = get_errno(fremovexattr(arg1, n));
12157             } else {
12158                 ret = -TARGET_EFAULT;
12159             }
12160             unlock_user(n, arg2, 0);
12161         }
12162         return ret;
12163 #endif
12164 #endif /* CONFIG_ATTR */
12165 #ifdef TARGET_NR_set_thread_area
12166     case TARGET_NR_set_thread_area:
12167 #if defined(TARGET_MIPS)
12168       cpu_env->active_tc.CP0_UserLocal = arg1;
12169       return 0;
12170 #elif defined(TARGET_CRIS)
12171       if (arg1 & 0xff)
12172           ret = -TARGET_EINVAL;
12173       else {
12174           cpu_env->pregs[PR_PID] = arg1;
12175           ret = 0;
12176       }
12177       return ret;
12178 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12179       return do_set_thread_area(cpu_env, arg1);
12180 #elif defined(TARGET_M68K)
12181       {
12182           TaskState *ts = cpu->opaque;
12183           ts->tp_value = arg1;
12184           return 0;
12185       }
12186 #else
12187       return -TARGET_ENOSYS;
12188 #endif
12189 #endif
12190 #ifdef TARGET_NR_get_thread_area
12191     case TARGET_NR_get_thread_area:
12192 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12193         return do_get_thread_area(cpu_env, arg1);
12194 #elif defined(TARGET_M68K)
12195         {
12196             TaskState *ts = cpu->opaque;
12197             return ts->tp_value;
12198         }
12199 #else
12200         return -TARGET_ENOSYS;
12201 #endif
12202 #endif
12203 #ifdef TARGET_NR_getdomainname
12204     case TARGET_NR_getdomainname:
12205         return -TARGET_ENOSYS;
12206 #endif
12207 
12208 #ifdef TARGET_NR_clock_settime
12209     case TARGET_NR_clock_settime:
12210     {
12211         struct timespec ts;
12212 
12213         ret = target_to_host_timespec(&ts, arg2);
12214         if (!is_error(ret)) {
12215             ret = get_errno(clock_settime(arg1, &ts));
12216         }
12217         return ret;
12218     }
12219 #endif
12220 #ifdef TARGET_NR_clock_settime64
12221     case TARGET_NR_clock_settime64:
12222     {
12223         struct timespec ts;
12224 
12225         ret = target_to_host_timespec64(&ts, arg2);
12226         if (!is_error(ret)) {
12227             ret = get_errno(clock_settime(arg1, &ts));
12228         }
12229         return ret;
12230     }
12231 #endif
12232 #ifdef TARGET_NR_clock_gettime
12233     case TARGET_NR_clock_gettime:
12234     {
12235         struct timespec ts;
12236         ret = get_errno(clock_gettime(arg1, &ts));
12237         if (!is_error(ret)) {
12238             ret = host_to_target_timespec(arg2, &ts);
12239         }
12240         return ret;
12241     }
12242 #endif
12243 #ifdef TARGET_NR_clock_gettime64
12244     case TARGET_NR_clock_gettime64:
12245     {
12246         struct timespec ts;
12247         ret = get_errno(clock_gettime(arg1, &ts));
12248         if (!is_error(ret)) {
12249             ret = host_to_target_timespec64(arg2, &ts);
12250         }
12251         return ret;
12252     }
12253 #endif
12254 #ifdef TARGET_NR_clock_getres
12255     case TARGET_NR_clock_getres:
12256     {
12257         struct timespec ts;
12258         ret = get_errno(clock_getres(arg1, &ts));
12259         if (!is_error(ret)) {
12260             host_to_target_timespec(arg2, &ts);
12261         }
12262         return ret;
12263     }
12264 #endif
12265 #ifdef TARGET_NR_clock_getres_time64
12266     case TARGET_NR_clock_getres_time64:
12267     {
12268         struct timespec ts;
12269         ret = get_errno(clock_getres(arg1, &ts));
12270         if (!is_error(ret)) {
12271             host_to_target_timespec64(arg2, &ts);
12272         }
12273         return ret;
12274     }
12275 #endif
12276 #ifdef TARGET_NR_clock_nanosleep
12277     case TARGET_NR_clock_nanosleep:
12278     {
12279         struct timespec ts;
12280         if (target_to_host_timespec(&ts, arg3)) {
12281             return -TARGET_EFAULT;
12282         }
12283         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12284                                              &ts, arg4 ? &ts : NULL));
12285         /*
12286          * if the call is interrupted by a signal handler, it fails
12287          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12288          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12289          */
12290         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12291             host_to_target_timespec(arg4, &ts)) {
12292               return -TARGET_EFAULT;
12293         }
12294 
12295         return ret;
12296     }
12297 #endif
12298 #ifdef TARGET_NR_clock_nanosleep_time64
12299     case TARGET_NR_clock_nanosleep_time64:
12300     {
12301         struct timespec ts;
12302 
12303         if (target_to_host_timespec64(&ts, arg3)) {
12304             return -TARGET_EFAULT;
12305         }
12306 
12307         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12308                                              &ts, arg4 ? &ts : NULL));
12309 
12310         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12311             host_to_target_timespec64(arg4, &ts)) {
12312             return -TARGET_EFAULT;
12313         }
12314         return ret;
12315     }
12316 #endif
12317 
12318 #if defined(TARGET_NR_set_tid_address)
12319     case TARGET_NR_set_tid_address:
12320     {
12321         TaskState *ts = cpu->opaque;
12322         ts->child_tidptr = arg1;
12323         /* do not call host set_tid_address() syscall, instead return tid() */
12324         return get_errno(sys_gettid());
12325     }
12326 #endif
12327 
12328     case TARGET_NR_tkill:
12329         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12330 
12331     case TARGET_NR_tgkill:
12332         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12333                          target_to_host_signal(arg3)));
12334 
12335 #ifdef TARGET_NR_set_robust_list
12336     case TARGET_NR_set_robust_list:
12337     case TARGET_NR_get_robust_list:
12338         /* The ABI for supporting robust futexes has userspace pass
12339          * the kernel a pointer to a linked list which is updated by
12340          * userspace after the syscall; the list is walked by the kernel
12341          * when the thread exits. Since the linked list in QEMU guest
12342          * memory isn't a valid linked list for the host and we have
12343          * no way to reliably intercept the thread-death event, we can't
12344          * support these. Silently return ENOSYS so that guest userspace
12345          * falls back to a non-robust futex implementation (which should
12346          * be OK except in the corner case of the guest crashing while
12347          * holding a mutex that is shared with another process via
12348          * shared memory).
12349          */
12350         return -TARGET_ENOSYS;
12351 #endif
12352 
12353 #if defined(TARGET_NR_utimensat)
12354     case TARGET_NR_utimensat:
12355         {
12356             struct timespec *tsp, ts[2];
12357             if (!arg3) {
12358                 tsp = NULL;
12359             } else {
12360                 if (target_to_host_timespec(ts, arg3)) {
12361                     return -TARGET_EFAULT;
12362                 }
12363                 if (target_to_host_timespec(ts + 1, arg3 +
12364                                             sizeof(struct target_timespec))) {
12365                     return -TARGET_EFAULT;
12366                 }
12367                 tsp = ts;
12368             }
12369             if (!arg2)
12370                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12371             else {
12372                 if (!(p = lock_user_string(arg2))) {
12373                     return -TARGET_EFAULT;
12374                 }
12375                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12376                 unlock_user(p, arg2, 0);
12377             }
12378         }
12379         return ret;
12380 #endif
12381 #ifdef TARGET_NR_utimensat_time64
12382     case TARGET_NR_utimensat_time64:
12383         {
12384             struct timespec *tsp, ts[2];
12385             if (!arg3) {
12386                 tsp = NULL;
12387             } else {
12388                 if (target_to_host_timespec64(ts, arg3)) {
12389                     return -TARGET_EFAULT;
12390                 }
12391                 if (target_to_host_timespec64(ts + 1, arg3 +
12392                                      sizeof(struct target__kernel_timespec))) {
12393                     return -TARGET_EFAULT;
12394                 }
12395                 tsp = ts;
12396             }
12397             if (!arg2)
12398                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12399             else {
12400                 p = lock_user_string(arg2);
12401                 if (!p) {
12402                     return -TARGET_EFAULT;
12403                 }
12404                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12405                 unlock_user(p, arg2, 0);
12406             }
12407         }
12408         return ret;
12409 #endif
12410 #ifdef TARGET_NR_futex
12411     case TARGET_NR_futex:
12412         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12413 #endif
12414 #ifdef TARGET_NR_futex_time64
12415     case TARGET_NR_futex_time64:
12416         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12417 #endif
12418 #ifdef CONFIG_INOTIFY
12419 #if defined(TARGET_NR_inotify_init)
12420     case TARGET_NR_inotify_init:
12421         ret = get_errno(inotify_init());
12422         if (ret >= 0) {
12423             fd_trans_register(ret, &target_inotify_trans);
12424         }
12425         return ret;
12426 #endif
12427 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12428     case TARGET_NR_inotify_init1:
12429         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12430                                           fcntl_flags_tbl)));
12431         if (ret >= 0) {
12432             fd_trans_register(ret, &target_inotify_trans);
12433         }
12434         return ret;
12435 #endif
12436 #if defined(TARGET_NR_inotify_add_watch)
12437     case TARGET_NR_inotify_add_watch:
12438         p = lock_user_string(arg2);
12439         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12440         unlock_user(p, arg2, 0);
12441         return ret;
12442 #endif
12443 #if defined(TARGET_NR_inotify_rm_watch)
12444     case TARGET_NR_inotify_rm_watch:
12445         return get_errno(inotify_rm_watch(arg1, arg2));
12446 #endif
12447 #endif
12448 
12449 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12450     case TARGET_NR_mq_open:
12451         {
12452             struct mq_attr posix_mq_attr;
12453             struct mq_attr *pposix_mq_attr;
12454             int host_flags;
12455 
12456             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12457             pposix_mq_attr = NULL;
12458             if (arg4) {
12459                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12460                     return -TARGET_EFAULT;
12461                 }
12462                 pposix_mq_attr = &posix_mq_attr;
12463             }
12464             p = lock_user_string(arg1 - 1);
12465             if (!p) {
12466                 return -TARGET_EFAULT;
12467             }
12468             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12469             unlock_user (p, arg1, 0);
12470         }
12471         return ret;
12472 
12473     case TARGET_NR_mq_unlink:
12474         p = lock_user_string(arg1 - 1);
12475         if (!p) {
12476             return -TARGET_EFAULT;
12477         }
12478         ret = get_errno(mq_unlink(p));
12479         unlock_user (p, arg1, 0);
12480         return ret;
12481 
12482 #ifdef TARGET_NR_mq_timedsend
12483     case TARGET_NR_mq_timedsend:
12484         {
12485             struct timespec ts;
12486 
12487             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12488             if (arg5 != 0) {
12489                 if (target_to_host_timespec(&ts, arg5)) {
12490                     return -TARGET_EFAULT;
12491                 }
12492                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12493                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12494                     return -TARGET_EFAULT;
12495                 }
12496             } else {
12497                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12498             }
12499             unlock_user (p, arg2, arg3);
12500         }
12501         return ret;
12502 #endif
12503 #ifdef TARGET_NR_mq_timedsend_time64
12504     case TARGET_NR_mq_timedsend_time64:
12505         {
12506             struct timespec ts;
12507 
12508             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12509             if (arg5 != 0) {
12510                 if (target_to_host_timespec64(&ts, arg5)) {
12511                     return -TARGET_EFAULT;
12512                 }
12513                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12514                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12515                     return -TARGET_EFAULT;
12516                 }
12517             } else {
12518                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12519             }
12520             unlock_user(p, arg2, arg3);
12521         }
12522         return ret;
12523 #endif
12524 
12525 #ifdef TARGET_NR_mq_timedreceive
12526     case TARGET_NR_mq_timedreceive:
12527         {
12528             struct timespec ts;
12529             unsigned int prio;
12530 
12531             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12532             if (arg5 != 0) {
12533                 if (target_to_host_timespec(&ts, arg5)) {
12534                     return -TARGET_EFAULT;
12535                 }
12536                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12537                                                      &prio, &ts));
12538                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12539                     return -TARGET_EFAULT;
12540                 }
12541             } else {
12542                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12543                                                      &prio, NULL));
12544             }
12545             unlock_user (p, arg2, arg3);
12546             if (arg4 != 0)
12547                 put_user_u32(prio, arg4);
12548         }
12549         return ret;
12550 #endif
12551 #ifdef TARGET_NR_mq_timedreceive_time64
12552     case TARGET_NR_mq_timedreceive_time64:
12553         {
12554             struct timespec ts;
12555             unsigned int prio;
12556 
12557             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12558             if (arg5 != 0) {
12559                 if (target_to_host_timespec64(&ts, arg5)) {
12560                     return -TARGET_EFAULT;
12561                 }
12562                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12563                                                      &prio, &ts));
12564                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12565                     return -TARGET_EFAULT;
12566                 }
12567             } else {
12568                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12569                                                      &prio, NULL));
12570             }
12571             unlock_user(p, arg2, arg3);
12572             if (arg4 != 0) {
12573                 put_user_u32(prio, arg4);
12574             }
12575         }
12576         return ret;
12577 #endif
12578 
12579     /* Not implemented for now... */
12580 /*     case TARGET_NR_mq_notify: */
12581 /*         break; */
12582 
12583     case TARGET_NR_mq_getsetattr:
12584         {
12585             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12586             ret = 0;
12587             if (arg2 != 0) {
12588                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12589                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12590                                            &posix_mq_attr_out));
12591             } else if (arg3 != 0) {
12592                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12593             }
12594             if (ret == 0 && arg3 != 0) {
12595                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12596             }
12597         }
12598         return ret;
12599 #endif
12600 
12601 #ifdef CONFIG_SPLICE
12602 #ifdef TARGET_NR_tee
12603     case TARGET_NR_tee:
12604         {
12605             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12606         }
12607         return ret;
12608 #endif
12609 #ifdef TARGET_NR_splice
12610     case TARGET_NR_splice:
12611         {
12612             loff_t loff_in, loff_out;
12613             loff_t *ploff_in = NULL, *ploff_out = NULL;
12614             if (arg2) {
12615                 if (get_user_u64(loff_in, arg2)) {
12616                     return -TARGET_EFAULT;
12617                 }
12618                 ploff_in = &loff_in;
12619             }
12620             if (arg4) {
12621                 if (get_user_u64(loff_out, arg4)) {
12622                     return -TARGET_EFAULT;
12623                 }
12624                 ploff_out = &loff_out;
12625             }
12626             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12627             if (arg2) {
12628                 if (put_user_u64(loff_in, arg2)) {
12629                     return -TARGET_EFAULT;
12630                 }
12631             }
12632             if (arg4) {
12633                 if (put_user_u64(loff_out, arg4)) {
12634                     return -TARGET_EFAULT;
12635                 }
12636             }
12637         }
12638         return ret;
12639 #endif
12640 #ifdef TARGET_NR_vmsplice
12641 	case TARGET_NR_vmsplice:
12642         {
12643             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12644             if (vec != NULL) {
12645                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12646                 unlock_iovec(vec, arg2, arg3, 0);
12647             } else {
12648                 ret = -host_to_target_errno(errno);
12649             }
12650         }
12651         return ret;
12652 #endif
12653 #endif /* CONFIG_SPLICE */
12654 #ifdef CONFIG_EVENTFD
12655 #if defined(TARGET_NR_eventfd)
12656     case TARGET_NR_eventfd:
12657         ret = get_errno(eventfd(arg1, 0));
12658         if (ret >= 0) {
12659             fd_trans_register(ret, &target_eventfd_trans);
12660         }
12661         return ret;
12662 #endif
12663 #if defined(TARGET_NR_eventfd2)
12664     case TARGET_NR_eventfd2:
12665     {
12666         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12667         if (arg2 & TARGET_O_NONBLOCK) {
12668             host_flags |= O_NONBLOCK;
12669         }
12670         if (arg2 & TARGET_O_CLOEXEC) {
12671             host_flags |= O_CLOEXEC;
12672         }
12673         ret = get_errno(eventfd(arg1, host_flags));
12674         if (ret >= 0) {
12675             fd_trans_register(ret, &target_eventfd_trans);
12676         }
12677         return ret;
12678     }
12679 #endif
12680 #endif /* CONFIG_EVENTFD  */
12681 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12682     case TARGET_NR_fallocate:
12683 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12684         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12685                                   target_offset64(arg5, arg6)));
12686 #else
12687         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12688 #endif
12689         return ret;
12690 #endif
12691 #if defined(CONFIG_SYNC_FILE_RANGE)
12692 #if defined(TARGET_NR_sync_file_range)
12693     case TARGET_NR_sync_file_range:
12694 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12695 #if defined(TARGET_MIPS)
12696         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12697                                         target_offset64(arg5, arg6), arg7));
12698 #else
12699         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12700                                         target_offset64(arg4, arg5), arg6));
12701 #endif /* !TARGET_MIPS */
12702 #else
12703         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12704 #endif
12705         return ret;
12706 #endif
12707 #if defined(TARGET_NR_sync_file_range2) || \
12708     defined(TARGET_NR_arm_sync_file_range)
12709 #if defined(TARGET_NR_sync_file_range2)
12710     case TARGET_NR_sync_file_range2:
12711 #endif
12712 #if defined(TARGET_NR_arm_sync_file_range)
12713     case TARGET_NR_arm_sync_file_range:
12714 #endif
12715         /* This is like sync_file_range but the arguments are reordered */
12716 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12717         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12718                                         target_offset64(arg5, arg6), arg2));
12719 #else
12720         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12721 #endif
12722         return ret;
12723 #endif
12724 #endif
12725 #if defined(TARGET_NR_signalfd4)
12726     case TARGET_NR_signalfd4:
12727         return do_signalfd4(arg1, arg2, arg4);
12728 #endif
12729 #if defined(TARGET_NR_signalfd)
12730     case TARGET_NR_signalfd:
12731         return do_signalfd4(arg1, arg2, 0);
12732 #endif
12733 #if defined(CONFIG_EPOLL)
12734 #if defined(TARGET_NR_epoll_create)
12735     case TARGET_NR_epoll_create:
12736         return get_errno(epoll_create(arg1));
12737 #endif
12738 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12739     case TARGET_NR_epoll_create1:
12740         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12741 #endif
12742 #if defined(TARGET_NR_epoll_ctl)
12743     case TARGET_NR_epoll_ctl:
12744     {
12745         struct epoll_event ep;
12746         struct epoll_event *epp = 0;
12747         if (arg4) {
12748             if (arg2 != EPOLL_CTL_DEL) {
12749                 struct target_epoll_event *target_ep;
12750                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12751                     return -TARGET_EFAULT;
12752                 }
12753                 ep.events = tswap32(target_ep->events);
12754                 /*
12755                  * The epoll_data_t union is just opaque data to the kernel,
12756                  * so we transfer all 64 bits across and need not worry what
12757                  * actual data type it is.
12758                  */
12759                 ep.data.u64 = tswap64(target_ep->data.u64);
12760                 unlock_user_struct(target_ep, arg4, 0);
12761             }
12762             /*
12763              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12764              * non-null pointer, even though this argument is ignored.
12765              *
12766              */
12767             epp = &ep;
12768         }
12769         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12770     }
12771 #endif
12772 
12773 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12774 #if defined(TARGET_NR_epoll_wait)
12775     case TARGET_NR_epoll_wait:
12776 #endif
12777 #if defined(TARGET_NR_epoll_pwait)
12778     case TARGET_NR_epoll_pwait:
12779 #endif
12780     {
12781         struct target_epoll_event *target_ep;
12782         struct epoll_event *ep;
12783         int epfd = arg1;
12784         int maxevents = arg3;
12785         int timeout = arg4;
12786 
12787         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12788             return -TARGET_EINVAL;
12789         }
12790 
12791         target_ep = lock_user(VERIFY_WRITE, arg2,
12792                               maxevents * sizeof(struct target_epoll_event), 1);
12793         if (!target_ep) {
12794             return -TARGET_EFAULT;
12795         }
12796 
12797         ep = g_try_new(struct epoll_event, maxevents);
12798         if (!ep) {
12799             unlock_user(target_ep, arg2, 0);
12800             return -TARGET_ENOMEM;
12801         }
12802 
12803         switch (num) {
12804 #if defined(TARGET_NR_epoll_pwait)
12805         case TARGET_NR_epoll_pwait:
12806         {
12807             sigset_t *set = NULL;
12808 
12809             if (arg5) {
12810                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12811                 if (ret != 0) {
12812                     break;
12813                 }
12814             }
12815 
12816             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12817                                              set, SIGSET_T_SIZE));
12818 
12819             if (set) {
12820                 finish_sigsuspend_mask(ret);
12821             }
12822             break;
12823         }
12824 #endif
12825 #if defined(TARGET_NR_epoll_wait)
12826         case TARGET_NR_epoll_wait:
12827             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12828                                              NULL, 0));
12829             break;
12830 #endif
12831         default:
12832             ret = -TARGET_ENOSYS;
12833         }
12834         if (!is_error(ret)) {
12835             int i;
12836             for (i = 0; i < ret; i++) {
12837                 target_ep[i].events = tswap32(ep[i].events);
12838                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12839             }
12840             unlock_user(target_ep, arg2,
12841                         ret * sizeof(struct target_epoll_event));
12842         } else {
12843             unlock_user(target_ep, arg2, 0);
12844         }
12845         g_free(ep);
12846         return ret;
12847     }
12848 #endif
12849 #endif
12850 #ifdef TARGET_NR_prlimit64
12851     case TARGET_NR_prlimit64:
12852     {
12853         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12854         struct target_rlimit64 *target_rnew, *target_rold;
12855         struct host_rlimit64 rnew, rold, *rnewp = 0;
12856         int resource = target_to_host_resource(arg2);
12857 
12858         if (arg3 && (resource != RLIMIT_AS &&
12859                      resource != RLIMIT_DATA &&
12860                      resource != RLIMIT_STACK)) {
12861             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12862                 return -TARGET_EFAULT;
12863             }
12864             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12865             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12866             unlock_user_struct(target_rnew, arg3, 0);
12867             rnewp = &rnew;
12868         }
12869 
12870         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12871         if (!is_error(ret) && arg4) {
12872             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12873                 return -TARGET_EFAULT;
12874             }
12875             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12876             target_rold->rlim_max = tswap64(rold.rlim_max);
12877             unlock_user_struct(target_rold, arg4, 1);
12878         }
12879         return ret;
12880     }
12881 #endif
12882 #ifdef TARGET_NR_gethostname
12883     case TARGET_NR_gethostname:
12884     {
12885         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12886         if (name) {
12887             ret = get_errno(gethostname(name, arg2));
12888             unlock_user(name, arg1, arg2);
12889         } else {
12890             ret = -TARGET_EFAULT;
12891         }
12892         return ret;
12893     }
12894 #endif
12895 #ifdef TARGET_NR_atomic_cmpxchg_32
12896     case TARGET_NR_atomic_cmpxchg_32:
12897     {
12898         /* should use start_exclusive from main.c */
12899         abi_ulong mem_value;
12900         if (get_user_u32(mem_value, arg6)) {
12901             target_siginfo_t info;
12902             info.si_signo = SIGSEGV;
12903             info.si_errno = 0;
12904             info.si_code = TARGET_SEGV_MAPERR;
12905             info._sifields._sigfault._addr = arg6;
12906             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12907             ret = 0xdeadbeef;
12908 
12909         }
12910         if (mem_value == arg2)
12911             put_user_u32(arg1, arg6);
12912         return mem_value;
12913     }
12914 #endif
12915 #ifdef TARGET_NR_atomic_barrier
12916     case TARGET_NR_atomic_barrier:
12917         /* Like the kernel implementation and the
12918            qemu arm barrier, no-op this? */
12919         return 0;
12920 #endif
12921 
12922 #ifdef TARGET_NR_timer_create
12923     case TARGET_NR_timer_create:
12924     {
12925         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12926 
12927         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12928 
12929         int clkid = arg1;
12930         int timer_index = next_free_host_timer();
12931 
12932         if (timer_index < 0) {
12933             ret = -TARGET_EAGAIN;
12934         } else {
12935             timer_t *phtimer = g_posix_timers  + timer_index;
12936 
12937             if (arg2) {
12938                 phost_sevp = &host_sevp;
12939                 ret = target_to_host_sigevent(phost_sevp, arg2);
12940                 if (ret != 0) {
12941                     free_host_timer_slot(timer_index);
12942                     return ret;
12943                 }
12944             }
12945 
12946             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12947             if (ret) {
12948                 free_host_timer_slot(timer_index);
12949             } else {
12950                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12951                     timer_delete(*phtimer);
12952                     free_host_timer_slot(timer_index);
12953                     return -TARGET_EFAULT;
12954                 }
12955             }
12956         }
12957         return ret;
12958     }
12959 #endif
12960 
12961 #ifdef TARGET_NR_timer_settime
12962     case TARGET_NR_timer_settime:
12963     {
12964         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12965          * struct itimerspec * old_value */
12966         target_timer_t timerid = get_timer_id(arg1);
12967 
12968         if (timerid < 0) {
12969             ret = timerid;
12970         } else if (arg3 == 0) {
12971             ret = -TARGET_EINVAL;
12972         } else {
12973             timer_t htimer = g_posix_timers[timerid];
12974             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12975 
12976             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12977                 return -TARGET_EFAULT;
12978             }
12979             ret = get_errno(
12980                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12981             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12982                 return -TARGET_EFAULT;
12983             }
12984         }
12985         return ret;
12986     }
12987 #endif
12988 
12989 #ifdef TARGET_NR_timer_settime64
12990     case TARGET_NR_timer_settime64:
12991     {
12992         target_timer_t timerid = get_timer_id(arg1);
12993 
12994         if (timerid < 0) {
12995             ret = timerid;
12996         } else if (arg3 == 0) {
12997             ret = -TARGET_EINVAL;
12998         } else {
12999             timer_t htimer = g_posix_timers[timerid];
13000             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13001 
13002             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13003                 return -TARGET_EFAULT;
13004             }
13005             ret = get_errno(
13006                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13007             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13008                 return -TARGET_EFAULT;
13009             }
13010         }
13011         return ret;
13012     }
13013 #endif
13014 
13015 #ifdef TARGET_NR_timer_gettime
13016     case TARGET_NR_timer_gettime:
13017     {
13018         /* args: timer_t timerid, struct itimerspec *curr_value */
13019         target_timer_t timerid = get_timer_id(arg1);
13020 
13021         if (timerid < 0) {
13022             ret = timerid;
13023         } else if (!arg2) {
13024             ret = -TARGET_EFAULT;
13025         } else {
13026             timer_t htimer = g_posix_timers[timerid];
13027             struct itimerspec hspec;
13028             ret = get_errno(timer_gettime(htimer, &hspec));
13029 
13030             if (host_to_target_itimerspec(arg2, &hspec)) {
13031                 ret = -TARGET_EFAULT;
13032             }
13033         }
13034         return ret;
13035     }
13036 #endif
13037 
13038 #ifdef TARGET_NR_timer_gettime64
13039     case TARGET_NR_timer_gettime64:
13040     {
13041         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13042         target_timer_t timerid = get_timer_id(arg1);
13043 
13044         if (timerid < 0) {
13045             ret = timerid;
13046         } else if (!arg2) {
13047             ret = -TARGET_EFAULT;
13048         } else {
13049             timer_t htimer = g_posix_timers[timerid];
13050             struct itimerspec hspec;
13051             ret = get_errno(timer_gettime(htimer, &hspec));
13052 
13053             if (host_to_target_itimerspec64(arg2, &hspec)) {
13054                 ret = -TARGET_EFAULT;
13055             }
13056         }
13057         return ret;
13058     }
13059 #endif
13060 
13061 #ifdef TARGET_NR_timer_getoverrun
13062     case TARGET_NR_timer_getoverrun:
13063     {
13064         /* args: timer_t timerid */
13065         target_timer_t timerid = get_timer_id(arg1);
13066 
13067         if (timerid < 0) {
13068             ret = timerid;
13069         } else {
13070             timer_t htimer = g_posix_timers[timerid];
13071             ret = get_errno(timer_getoverrun(htimer));
13072         }
13073         return ret;
13074     }
13075 #endif
13076 
13077 #ifdef TARGET_NR_timer_delete
13078     case TARGET_NR_timer_delete:
13079     {
13080         /* args: timer_t timerid */
13081         target_timer_t timerid = get_timer_id(arg1);
13082 
13083         if (timerid < 0) {
13084             ret = timerid;
13085         } else {
13086             timer_t htimer = g_posix_timers[timerid];
13087             ret = get_errno(timer_delete(htimer));
13088             free_host_timer_slot(timerid);
13089         }
13090         return ret;
13091     }
13092 #endif
13093 
13094 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13095     case TARGET_NR_timerfd_create:
13096         return get_errno(timerfd_create(arg1,
13097                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13098 #endif
13099 
13100 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13101     case TARGET_NR_timerfd_gettime:
13102         {
13103             struct itimerspec its_curr;
13104 
13105             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13106 
13107             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13108                 return -TARGET_EFAULT;
13109             }
13110         }
13111         return ret;
13112 #endif
13113 
13114 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13115     case TARGET_NR_timerfd_gettime64:
13116         {
13117             struct itimerspec its_curr;
13118 
13119             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13120 
13121             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13122                 return -TARGET_EFAULT;
13123             }
13124         }
13125         return ret;
13126 #endif
13127 
13128 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13129     case TARGET_NR_timerfd_settime:
13130         {
13131             struct itimerspec its_new, its_old, *p_new;
13132 
13133             if (arg3) {
13134                 if (target_to_host_itimerspec(&its_new, arg3)) {
13135                     return -TARGET_EFAULT;
13136                 }
13137                 p_new = &its_new;
13138             } else {
13139                 p_new = NULL;
13140             }
13141 
13142             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13143 
13144             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13145                 return -TARGET_EFAULT;
13146             }
13147         }
13148         return ret;
13149 #endif
13150 
13151 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13152     case TARGET_NR_timerfd_settime64:
13153         {
13154             struct itimerspec its_new, its_old, *p_new;
13155 
13156             if (arg3) {
13157                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13158                     return -TARGET_EFAULT;
13159                 }
13160                 p_new = &its_new;
13161             } else {
13162                 p_new = NULL;
13163             }
13164 
13165             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13166 
13167             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13168                 return -TARGET_EFAULT;
13169             }
13170         }
13171         return ret;
13172 #endif
13173 
13174 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13175     case TARGET_NR_ioprio_get:
13176         return get_errno(ioprio_get(arg1, arg2));
13177 #endif
13178 
13179 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13180     case TARGET_NR_ioprio_set:
13181         return get_errno(ioprio_set(arg1, arg2, arg3));
13182 #endif
13183 
13184 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13185     case TARGET_NR_setns:
13186         return get_errno(setns(arg1, arg2));
13187 #endif
13188 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13189     case TARGET_NR_unshare:
13190         return get_errno(unshare(arg1));
13191 #endif
13192 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13193     case TARGET_NR_kcmp:
13194         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13195 #endif
13196 #ifdef TARGET_NR_swapcontext
13197     case TARGET_NR_swapcontext:
13198         /* PowerPC specific.  */
13199         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13200 #endif
13201 #ifdef TARGET_NR_memfd_create
13202     case TARGET_NR_memfd_create:
13203         p = lock_user_string(arg1);
13204         if (!p) {
13205             return -TARGET_EFAULT;
13206         }
13207         ret = get_errno(memfd_create(p, arg2));
13208         fd_trans_unregister(ret);
13209         unlock_user(p, arg1, 0);
13210         return ret;
13211 #endif
13212 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13213     case TARGET_NR_membarrier:
13214         return get_errno(membarrier(arg1, arg2));
13215 #endif
13216 
13217 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13218     case TARGET_NR_copy_file_range:
13219         {
13220             loff_t inoff, outoff;
13221             loff_t *pinoff = NULL, *poutoff = NULL;
13222 
13223             if (arg2) {
13224                 if (get_user_u64(inoff, arg2)) {
13225                     return -TARGET_EFAULT;
13226                 }
13227                 pinoff = &inoff;
13228             }
13229             if (arg4) {
13230                 if (get_user_u64(outoff, arg4)) {
13231                     return -TARGET_EFAULT;
13232                 }
13233                 poutoff = &outoff;
13234             }
13235             /* Do not sign-extend the count parameter. */
13236             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13237                                                  (abi_ulong)arg5, arg6));
13238             if (!is_error(ret) && ret > 0) {
13239                 if (arg2) {
13240                     if (put_user_u64(inoff, arg2)) {
13241                         return -TARGET_EFAULT;
13242                     }
13243                 }
13244                 if (arg4) {
13245                     if (put_user_u64(outoff, arg4)) {
13246                         return -TARGET_EFAULT;
13247                     }
13248                 }
13249             }
13250         }
13251         return ret;
13252 #endif
13253 
13254 #if defined(TARGET_NR_pivot_root)
13255     case TARGET_NR_pivot_root:
13256         {
13257             void *p2;
13258             p = lock_user_string(arg1); /* new_root */
13259             p2 = lock_user_string(arg2); /* put_old */
13260             if (!p || !p2) {
13261                 ret = -TARGET_EFAULT;
13262             } else {
13263                 ret = get_errno(pivot_root(p, p2));
13264             }
13265             unlock_user(p2, arg2, 0);
13266             unlock_user(p, arg1, 0);
13267         }
13268         return ret;
13269 #endif
13270 
13271     default:
13272         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13273         return -TARGET_ENOSYS;
13274     }
13275     return ret;
13276 }
13277 
13278 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13279                     abi_long arg2, abi_long arg3, abi_long arg4,
13280                     abi_long arg5, abi_long arg6, abi_long arg7,
13281                     abi_long arg8)
13282 {
13283     CPUState *cpu = env_cpu(cpu_env);
13284     abi_long ret;
13285 
13286 #ifdef DEBUG_ERESTARTSYS
13287     /* Debug-only code for exercising the syscall-restart code paths
13288      * in the per-architecture cpu main loops: restart every syscall
13289      * the guest makes once before letting it through.
13290      */
13291     {
13292         static bool flag;
13293         flag = !flag;
13294         if (flag) {
13295             return -QEMU_ERESTARTSYS;
13296         }
13297     }
13298 #endif
13299 
13300     record_syscall_start(cpu, num, arg1,
13301                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13302 
13303     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13304         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13305     }
13306 
13307     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13308                       arg5, arg6, arg7, arg8);
13309 
13310     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13311         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13312                           arg3, arg4, arg5, arg6);
13313     }
13314 
13315     record_syscall_return(cpu, num, ret);
13316     return ret;
13317 }
13318