xref: /openbmc/qemu/linux-user/syscall.c (revision fe080593)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "target_mman.h"
26 #include <elf.h>
27 #include <endian.h>
28 #include <grp.h>
29 #include <sys/ipc.h>
30 #include <sys/msg.h>
31 #include <sys/wait.h>
32 #include <sys/mount.h>
33 #include <sys/file.h>
34 #include <sys/fsuid.h>
35 #include <sys/personality.h>
36 #include <sys/prctl.h>
37 #include <sys/resource.h>
38 #include <sys/swap.h>
39 #include <linux/capability.h>
40 #include <sched.h>
41 #include <sys/timex.h>
42 #include <sys/socket.h>
43 #include <linux/sockios.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/in.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <netinet/udp.h>
59 #include <linux/wireless.h>
60 #include <linux/icmp.h>
61 #include <linux/icmpv6.h>
62 #include <linux/if_tun.h>
63 #include <linux/in6.h>
64 #include <linux/errqueue.h>
65 #include <linux/random.h>
66 #ifdef CONFIG_TIMERFD
67 #include <sys/timerfd.h>
68 #endif
69 #ifdef CONFIG_EVENTFD
70 #include <sys/eventfd.h>
71 #endif
72 #ifdef CONFIG_EPOLL
73 #include <sys/epoll.h>
74 #endif
75 #ifdef CONFIG_ATTR
76 #include "qemu/xattr.h"
77 #endif
78 #ifdef CONFIG_SENDFILE
79 #include <sys/sendfile.h>
80 #endif
81 #ifdef HAVE_SYS_KCOV_H
82 #include <sys/kcov.h>
83 #endif
84 
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
91 
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/cdrom.h>
95 #include <linux/hdreg.h>
96 #include <linux/soundcard.h>
97 #include <linux/kd.h>
98 #include <linux/mtio.h>
99 #include <linux/fs.h>
100 #include <linux/fd.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #if defined(CONFIG_USBFS)
106 #include <linux/usbdevice_fs.h>
107 #include <linux/usb/ch9.h>
108 #endif
109 #include <linux/vt.h>
110 #include <linux/dm-ioctl.h>
111 #include <linux/reboot.h>
112 #include <linux/route.h>
113 #include <linux/filter.h>
114 #include <linux/blkpg.h>
115 #include <netpacket/packet.h>
116 #include <linux/netlink.h>
117 #include <linux/if_alg.h>
118 #include <linux/rtc.h>
119 #include <sound/asound.h>
120 #ifdef HAVE_BTRFS_H
121 #include <linux/btrfs.h>
122 #endif
123 #ifdef HAVE_DRM_H
124 #include <libdrm/drm.h>
125 #include <libdrm/i915_drm.h>
126 #endif
127 #include "linux_loop.h"
128 #include "uname.h"
129 
130 #include "qemu.h"
131 #include "user-internals.h"
132 #include "strace.h"
133 #include "signal-common.h"
134 #include "loader.h"
135 #include "user-mmap.h"
136 #include "user/safe-syscall.h"
137 #include "qemu/guest-random.h"
138 #include "qemu/selfmap.h"
139 #include "user/syscall-trace.h"
140 #include "special-errno.h"
141 #include "qapi/error.h"
142 #include "fd-trans.h"
143 #include "tcg/tcg.h"
144 #include "cpu_loop-common.h"
145 
146 #ifndef CLONE_IO
147 #define CLONE_IO                0x80000000      /* Clone io context */
148 #endif
149 
150 /* We can't directly call the host clone syscall, because this will
151  * badly confuse libc (breaking mutexes, for example). So we must
152  * divide clone flags into:
153  *  * flag combinations that look like pthread_create()
154  *  * flag combinations that look like fork()
155  *  * flags we can implement within QEMU itself
156  *  * flags we can't support and will return an error for
157  */
158 /* For thread creation, all these flags must be present; for
159  * fork, none must be present.
160  */
161 #define CLONE_THREAD_FLAGS                              \
162     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
163      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
164 
165 /* These flags are ignored:
166  * CLONE_DETACHED is now ignored by the kernel;
167  * CLONE_IO is just an optimisation hint to the I/O scheduler
168  */
169 #define CLONE_IGNORED_FLAGS                     \
170     (CLONE_DETACHED | CLONE_IO)
171 
172 /* Flags for fork which we can implement within QEMU itself */
173 #define CLONE_OPTIONAL_FORK_FLAGS               \
174     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
175      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
176 
177 /* Flags for thread creation which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
181 
182 #define CLONE_INVALID_FORK_FLAGS                                        \
183     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
184 
185 #define CLONE_INVALID_THREAD_FLAGS                                      \
186     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
187        CLONE_IGNORED_FLAGS))
188 
189 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
190  * have almost all been allocated. We cannot support any of
191  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
192  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
193  * The checks against the invalid thread masks above will catch these.
194  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
195  */
196 
197 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
198  * once. This exercises the codepaths for restart.
199  */
200 //#define DEBUG_ERESTARTSYS
201 
202 //#include <linux/msdos_fs.h>
203 #define VFAT_IOCTL_READDIR_BOTH \
204     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
205 #define VFAT_IOCTL_READDIR_SHORT \
206     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
207 
208 #undef _syscall0
209 #undef _syscall1
210 #undef _syscall2
211 #undef _syscall3
212 #undef _syscall4
213 #undef _syscall5
214 #undef _syscall6
215 
216 #define _syscall0(type,name)		\
217 static type name (void)			\
218 {					\
219 	return syscall(__NR_##name);	\
220 }
221 
222 #define _syscall1(type,name,type1,arg1)		\
223 static type name (type1 arg1)			\
224 {						\
225 	return syscall(__NR_##name, arg1);	\
226 }
227 
228 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
229 static type name (type1 arg1,type2 arg2)		\
230 {							\
231 	return syscall(__NR_##name, arg1, arg2);	\
232 }
233 
234 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
235 static type name (type1 arg1,type2 arg2,type3 arg3)		\
236 {								\
237 	return syscall(__NR_##name, arg1, arg2, arg3);		\
238 }
239 
240 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
241 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
242 {										\
243 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
244 }
245 
246 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
247 		  type5,arg5)							\
248 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
249 {										\
250 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
251 }
252 
253 
254 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
255 		  type5,arg5,type6,arg6)					\
256 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
257                   type6 arg6)							\
258 {										\
259 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
260 }
261 
262 
263 #define __NR_sys_uname __NR_uname
264 #define __NR_sys_getcwd1 __NR_getcwd
265 #define __NR_sys_getdents __NR_getdents
266 #define __NR_sys_getdents64 __NR_getdents64
267 #define __NR_sys_getpriority __NR_getpriority
268 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
269 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
270 #define __NR_sys_syslog __NR_syslog
271 #if defined(__NR_futex)
272 # define __NR_sys_futex __NR_futex
273 #endif
274 #if defined(__NR_futex_time64)
275 # define __NR_sys_futex_time64 __NR_futex_time64
276 #endif
277 #define __NR_sys_statx __NR_statx
278 
279 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
280 #define __NR__llseek __NR_lseek
281 #endif
282 
283 /* Newer kernel ports have llseek() instead of _llseek() */
284 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
285 #define TARGET_NR__llseek TARGET_NR_llseek
286 #endif
287 
288 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
289 #ifndef TARGET_O_NONBLOCK_MASK
290 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
291 #endif
292 
293 #define __NR_sys_gettid __NR_gettid
294 _syscall0(int, sys_gettid)
295 
296 /* For the 64-bit guest on 32-bit host case we must emulate
297  * getdents using getdents64, because otherwise the host
298  * might hand us back more dirent records than we can fit
299  * into the guest buffer after structure format conversion.
300  * Otherwise we emulate getdents with getdents if the host has it.
301  */
302 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
303 #define EMULATE_GETDENTS_WITH_GETDENTS
304 #endif
305 
306 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
307 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
308 #endif
309 #if (defined(TARGET_NR_getdents) && \
310       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
311     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
312 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
313 #endif
314 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
315 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
316           loff_t *, res, uint, wh);
317 #endif
318 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
319 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
320           siginfo_t *, uinfo)
321 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
322 #ifdef __NR_exit_group
323 _syscall1(int,exit_group,int,error_code)
324 #endif
325 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
326 #define __NR_sys_close_range __NR_close_range
327 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
328 #ifndef CLOSE_RANGE_CLOEXEC
329 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
330 #endif
331 #endif
332 #if defined(__NR_futex)
333 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
334           const struct timespec *,timeout,int *,uaddr2,int,val3)
335 #endif
336 #if defined(__NR_futex_time64)
337 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
338           const struct timespec *,timeout,int *,uaddr2,int,val3)
339 #endif
340 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
341 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
342 #endif
343 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
344 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
345                              unsigned int, flags);
346 #endif
347 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
348 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
349 #endif
350 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
351 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
352           unsigned long *, user_mask_ptr);
353 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
354 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
355           unsigned long *, user_mask_ptr);
356 /* sched_attr is not defined in glibc */
357 struct sched_attr {
358     uint32_t size;
359     uint32_t sched_policy;
360     uint64_t sched_flags;
361     int32_t sched_nice;
362     uint32_t sched_priority;
363     uint64_t sched_runtime;
364     uint64_t sched_deadline;
365     uint64_t sched_period;
366     uint32_t sched_util_min;
367     uint32_t sched_util_max;
368 };
369 #define __NR_sys_sched_getattr __NR_sched_getattr
370 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
371           unsigned int, size, unsigned int, flags);
372 #define __NR_sys_sched_setattr __NR_sched_setattr
373 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
374           unsigned int, flags);
375 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
376 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
377 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
378 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
379           const struct sched_param *, param);
380 #define __NR_sys_sched_getparam __NR_sched_getparam
381 _syscall2(int, sys_sched_getparam, pid_t, pid,
382           struct sched_param *, param);
383 #define __NR_sys_sched_setparam __NR_sched_setparam
384 _syscall2(int, sys_sched_setparam, pid_t, pid,
385           const struct sched_param *, param);
386 #define __NR_sys_getcpu __NR_getcpu
387 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
388 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
389           void *, arg);
390 _syscall2(int, capget, struct __user_cap_header_struct *, header,
391           struct __user_cap_data_struct *, data);
392 _syscall2(int, capset, struct __user_cap_header_struct *, header,
393           struct __user_cap_data_struct *, data);
394 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
395 _syscall2(int, ioprio_get, int, which, int, who)
396 #endif
397 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
398 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
399 #endif
400 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
401 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
402 #endif
403 
404 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
405 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
406           unsigned long, idx1, unsigned long, idx2)
407 #endif
408 
409 /*
410  * It is assumed that struct statx is architecture independent.
411  */
412 #if defined(TARGET_NR_statx) && defined(__NR_statx)
413 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
414           unsigned int, mask, struct target_statx *, statxbuf)
415 #endif
416 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
417 _syscall2(int, membarrier, int, cmd, int, flags)
418 #endif
419 
420 static const bitmask_transtbl fcntl_flags_tbl[] = {
421   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
422   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
423   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
424   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
425   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
426   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
427   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
428   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
429   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
430   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
431   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
432   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
433   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
434 #if defined(O_DIRECT)
435   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
436 #endif
437 #if defined(O_NOATIME)
438   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
439 #endif
440 #if defined(O_CLOEXEC)
441   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
442 #endif
443 #if defined(O_PATH)
444   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
445 #endif
446 #if defined(O_TMPFILE)
447   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
448 #endif
449   /* Don't terminate the list prematurely on 64-bit host+guest.  */
450 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
451   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
452 #endif
453   { 0, 0, 0, 0 }
454 };
455 
456 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
457 
458 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
459 #if defined(__NR_utimensat)
460 #define __NR_sys_utimensat __NR_utimensat
461 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
462           const struct timespec *,tsp,int,flags)
463 #else
464 static int sys_utimensat(int dirfd, const char *pathname,
465                          const struct timespec times[2], int flags)
466 {
467     errno = ENOSYS;
468     return -1;
469 }
470 #endif
471 #endif /* TARGET_NR_utimensat */
472 
473 #ifdef TARGET_NR_renameat2
474 #if defined(__NR_renameat2)
475 #define __NR_sys_renameat2 __NR_renameat2
476 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
477           const char *, new, unsigned int, flags)
478 #else
479 static int sys_renameat2(int oldfd, const char *old,
480                          int newfd, const char *new, int flags)
481 {
482     if (flags == 0) {
483         return renameat(oldfd, old, newfd, new);
484     }
485     errno = ENOSYS;
486     return -1;
487 }
488 #endif
489 #endif /* TARGET_NR_renameat2 */
490 
491 #ifdef CONFIG_INOTIFY
492 #include <sys/inotify.h>
493 #else
494 /* Userspace can usually survive runtime without inotify */
495 #undef TARGET_NR_inotify_init
496 #undef TARGET_NR_inotify_init1
497 #undef TARGET_NR_inotify_add_watch
498 #undef TARGET_NR_inotify_rm_watch
499 #endif /* CONFIG_INOTIFY  */
500 
501 #if defined(TARGET_NR_prlimit64)
502 #ifndef __NR_prlimit64
503 # define __NR_prlimit64 -1
504 #endif
505 #define __NR_sys_prlimit64 __NR_prlimit64
506 /* The glibc rlimit structure may not be that used by the underlying syscall */
507 struct host_rlimit64 {
508     uint64_t rlim_cur;
509     uint64_t rlim_max;
510 };
511 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
512           const struct host_rlimit64 *, new_limit,
513           struct host_rlimit64 *, old_limit)
514 #endif
515 
516 
517 #if defined(TARGET_NR_timer_create)
518 /* Maximum of 32 active POSIX timers allowed at any one time. */
519 #define GUEST_TIMER_MAX 32
520 static timer_t g_posix_timers[GUEST_TIMER_MAX];
521 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
522 
523 static inline int next_free_host_timer(void)
524 {
525     int k;
526     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
527         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
528             return k;
529         }
530     }
531     return -1;
532 }
533 
534 static inline void free_host_timer_slot(int id)
535 {
536     qatomic_store_release(g_posix_timer_allocated + id, 0);
537 }
538 #endif
539 
540 static inline int host_to_target_errno(int host_errno)
541 {
542     switch (host_errno) {
543 #define E(X)  case X: return TARGET_##X;
544 #include "errnos.c.inc"
545 #undef E
546     default:
547         return host_errno;
548     }
549 }
550 
551 static inline int target_to_host_errno(int target_errno)
552 {
553     switch (target_errno) {
554 #define E(X)  case TARGET_##X: return X;
555 #include "errnos.c.inc"
556 #undef E
557     default:
558         return target_errno;
559     }
560 }
561 
562 abi_long get_errno(abi_long ret)
563 {
564     if (ret == -1)
565         return -host_to_target_errno(errno);
566     else
567         return ret;
568 }
569 
570 const char *target_strerror(int err)
571 {
572     if (err == QEMU_ERESTARTSYS) {
573         return "To be restarted";
574     }
575     if (err == QEMU_ESIGRETURN) {
576         return "Successful exit from sigreturn";
577     }
578 
579     return strerror(target_to_host_errno(err));
580 }
581 
582 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
583 {
584     int i;
585     uint8_t b;
586     if (usize <= ksize) {
587         return 1;
588     }
589     for (i = ksize; i < usize; i++) {
590         if (get_user_u8(b, addr + i)) {
591             return -TARGET_EFAULT;
592         }
593         if (b != 0) {
594             return 0;
595         }
596     }
597     return 1;
598 }
599 
600 #define safe_syscall0(type, name) \
601 static type safe_##name(void) \
602 { \
603     return safe_syscall(__NR_##name); \
604 }
605 
606 #define safe_syscall1(type, name, type1, arg1) \
607 static type safe_##name(type1 arg1) \
608 { \
609     return safe_syscall(__NR_##name, arg1); \
610 }
611 
612 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
613 static type safe_##name(type1 arg1, type2 arg2) \
614 { \
615     return safe_syscall(__NR_##name, arg1, arg2); \
616 }
617 
618 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
619 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
620 { \
621     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
622 }
623 
624 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
625     type4, arg4) \
626 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
627 { \
628     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
629 }
630 
631 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
632     type4, arg4, type5, arg5) \
633 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
634     type5 arg5) \
635 { \
636     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
637 }
638 
639 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
640     type4, arg4, type5, arg5, type6, arg6) \
641 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
642     type5 arg5, type6 arg6) \
643 { \
644     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
645 }
646 
647 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
648 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
649 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
650               int, flags, mode_t, mode)
651 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
652 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
653               struct rusage *, rusage)
654 #endif
655 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
656               int, options, struct rusage *, rusage)
657 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
658               char **, argv, char **, envp, int, flags)
659 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
660     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
661 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
662               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
663 #endif
664 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
665 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
666               struct timespec *, tsp, const sigset_t *, sigmask,
667               size_t, sigsetsize)
668 #endif
669 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
670               int, maxevents, int, timeout, const sigset_t *, sigmask,
671               size_t, sigsetsize)
672 #if defined(__NR_futex)
673 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
674               const struct timespec *,timeout,int *,uaddr2,int,val3)
675 #endif
676 #if defined(__NR_futex_time64)
677 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
678               const struct timespec *,timeout,int *,uaddr2,int,val3)
679 #endif
680 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
681 safe_syscall2(int, kill, pid_t, pid, int, sig)
682 safe_syscall2(int, tkill, int, tid, int, sig)
683 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
684 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
685 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
686 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
687               unsigned long, pos_l, unsigned long, pos_h)
688 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
689               unsigned long, pos_l, unsigned long, pos_h)
690 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
691               socklen_t, addrlen)
692 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
693               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
694 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
695               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
696 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
697 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
698 safe_syscall2(int, flock, int, fd, int, operation)
699 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
700 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
701               const struct timespec *, uts, size_t, sigsetsize)
702 #endif
703 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
704               int, flags)
705 #if defined(TARGET_NR_nanosleep)
706 safe_syscall2(int, nanosleep, const struct timespec *, req,
707               struct timespec *, rem)
708 #endif
709 #if defined(TARGET_NR_clock_nanosleep) || \
710     defined(TARGET_NR_clock_nanosleep_time64)
711 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
712               const struct timespec *, req, struct timespec *, rem)
713 #endif
714 #ifdef __NR_ipc
715 #ifdef __s390x__
716 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
717               void *, ptr)
718 #else
719 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
720               void *, ptr, long, fifth)
721 #endif
722 #endif
723 #ifdef __NR_msgsnd
724 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
725               int, flags)
726 #endif
727 #ifdef __NR_msgrcv
728 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
729               long, msgtype, int, flags)
730 #endif
731 #ifdef __NR_semtimedop
732 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
733               unsigned, nsops, const struct timespec *, timeout)
734 #endif
735 #if defined(TARGET_NR_mq_timedsend) || \
736     defined(TARGET_NR_mq_timedsend_time64)
737 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
738               size_t, len, unsigned, prio, const struct timespec *, timeout)
739 #endif
740 #if defined(TARGET_NR_mq_timedreceive) || \
741     defined(TARGET_NR_mq_timedreceive_time64)
742 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
743               size_t, len, unsigned *, prio, const struct timespec *, timeout)
744 #endif
745 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
746 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
747               int, outfd, loff_t *, poutoff, size_t, length,
748               unsigned int, flags)
749 #endif
750 
751 /* We do ioctl like this rather than via safe_syscall3 to preserve the
752  * "third argument might be integer or pointer or not present" behaviour of
753  * the libc function.
754  */
755 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
756 /* Similarly for fcntl. Note that callers must always:
757  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
758  *  use the flock64 struct rather than unsuffixed flock
759  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
760  */
761 #ifdef __NR_fcntl64
762 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
763 #else
764 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
765 #endif
766 
767 static inline int host_to_target_sock_type(int host_type)
768 {
769     int target_type;
770 
771     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
772     case SOCK_DGRAM:
773         target_type = TARGET_SOCK_DGRAM;
774         break;
775     case SOCK_STREAM:
776         target_type = TARGET_SOCK_STREAM;
777         break;
778     default:
779         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
780         break;
781     }
782 
783 #if defined(SOCK_CLOEXEC)
784     if (host_type & SOCK_CLOEXEC) {
785         target_type |= TARGET_SOCK_CLOEXEC;
786     }
787 #endif
788 
789 #if defined(SOCK_NONBLOCK)
790     if (host_type & SOCK_NONBLOCK) {
791         target_type |= TARGET_SOCK_NONBLOCK;
792     }
793 #endif
794 
795     return target_type;
796 }
797 
798 static abi_ulong target_brk;
799 static abi_ulong brk_page;
800 
801 void target_set_brk(abi_ulong new_brk)
802 {
803     target_brk = new_brk;
804     brk_page = HOST_PAGE_ALIGN(target_brk);
805 }
806 
807 /* do_brk() must return target values and target errnos. */
808 abi_long do_brk(abi_ulong brk_val)
809 {
810     abi_long mapped_addr;
811     abi_ulong new_alloc_size;
812     abi_ulong new_brk, new_host_brk_page;
813 
814     /* brk pointers are always untagged */
815 
816     /* return old brk value if brk_val unchanged or zero */
817     if (!brk_val || brk_val == target_brk) {
818         return target_brk;
819     }
820 
821     new_brk = TARGET_PAGE_ALIGN(brk_val);
822     new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
823 
824     /* brk_val and old target_brk might be on the same page */
825     if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
826         if (brk_val > target_brk) {
827             /* empty remaining bytes in (possibly larger) host page */
828             memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk);
829         }
830         target_brk = brk_val;
831         return target_brk;
832     }
833 
834     /* Release heap if necesary */
835     if (new_brk < target_brk) {
836         /* empty remaining bytes in (possibly larger) host page */
837         memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val);
838 
839         /* free unused host pages and set new brk_page */
840         target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
841         brk_page = new_host_brk_page;
842 
843         target_brk = brk_val;
844         return target_brk;
845     }
846 
847     /* We need to allocate more memory after the brk... Note that
848      * we don't use MAP_FIXED because that will map over the top of
849      * any existing mapping (like the one with the host libc or qemu
850      * itself); instead we treat "mapped but at wrong address" as
851      * a failure and unmap again.
852      */
853     new_alloc_size = new_host_brk_page - brk_page;
854     if (new_alloc_size) {
855         mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
856                                         PROT_READ|PROT_WRITE,
857                                         MAP_ANON|MAP_PRIVATE, 0, 0));
858     } else {
859         mapped_addr = brk_page;
860     }
861 
862     if (mapped_addr == brk_page) {
863         /* Heap contents are initialized to zero, as for anonymous
864          * mapped pages.  Technically the new pages are already
865          * initialized to zero since they *are* anonymous mapped
866          * pages, however we have to take care with the contents that
867          * come from the remaining part of the previous page: it may
868          * contains garbage data due to a previous heap usage (grown
869          * then shrunken).  */
870         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
871 
872         target_brk = brk_val;
873         brk_page = new_host_brk_page;
874         return target_brk;
875     } else if (mapped_addr != -1) {
876         /* Mapped but at wrong address, meaning there wasn't actually
877          * enough space for this brk.
878          */
879         target_munmap(mapped_addr, new_alloc_size);
880         mapped_addr = -1;
881     }
882 
883 #if defined(TARGET_ALPHA)
884     /* We (partially) emulate OSF/1 on Alpha, which requires we
885        return a proper errno, not an unchanged brk value.  */
886     return -TARGET_ENOMEM;
887 #endif
888     /* For everything else, return the previous break. */
889     return target_brk;
890 }
891 
892 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
893     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
894 static inline abi_long copy_from_user_fdset(fd_set *fds,
895                                             abi_ulong target_fds_addr,
896                                             int n)
897 {
898     int i, nw, j, k;
899     abi_ulong b, *target_fds;
900 
901     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
902     if (!(target_fds = lock_user(VERIFY_READ,
903                                  target_fds_addr,
904                                  sizeof(abi_ulong) * nw,
905                                  1)))
906         return -TARGET_EFAULT;
907 
908     FD_ZERO(fds);
909     k = 0;
910     for (i = 0; i < nw; i++) {
911         /* grab the abi_ulong */
912         __get_user(b, &target_fds[i]);
913         for (j = 0; j < TARGET_ABI_BITS; j++) {
914             /* check the bit inside the abi_ulong */
915             if ((b >> j) & 1)
916                 FD_SET(k, fds);
917             k++;
918         }
919     }
920 
921     unlock_user(target_fds, target_fds_addr, 0);
922 
923     return 0;
924 }
925 
926 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
927                                                  abi_ulong target_fds_addr,
928                                                  int n)
929 {
930     if (target_fds_addr) {
931         if (copy_from_user_fdset(fds, target_fds_addr, n))
932             return -TARGET_EFAULT;
933         *fds_ptr = fds;
934     } else {
935         *fds_ptr = NULL;
936     }
937     return 0;
938 }
939 
940 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
941                                           const fd_set *fds,
942                                           int n)
943 {
944     int i, nw, j, k;
945     abi_long v;
946     abi_ulong *target_fds;
947 
948     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
949     if (!(target_fds = lock_user(VERIFY_WRITE,
950                                  target_fds_addr,
951                                  sizeof(abi_ulong) * nw,
952                                  0)))
953         return -TARGET_EFAULT;
954 
955     k = 0;
956     for (i = 0; i < nw; i++) {
957         v = 0;
958         for (j = 0; j < TARGET_ABI_BITS; j++) {
959             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
960             k++;
961         }
962         __put_user(v, &target_fds[i]);
963     }
964 
965     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
966 
967     return 0;
968 }
969 #endif
970 
971 #if defined(__alpha__)
972 #define HOST_HZ 1024
973 #else
974 #define HOST_HZ 100
975 #endif
976 
977 static inline abi_long host_to_target_clock_t(long ticks)
978 {
979 #if HOST_HZ == TARGET_HZ
980     return ticks;
981 #else
982     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
983 #endif
984 }
985 
986 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
987                                              const struct rusage *rusage)
988 {
989     struct target_rusage *target_rusage;
990 
991     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
992         return -TARGET_EFAULT;
993     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
994     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
995     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
996     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
997     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
998     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
999     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1000     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1001     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1002     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1003     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1004     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1005     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1006     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1007     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1008     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1009     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1010     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1011     unlock_user_struct(target_rusage, target_addr, 1);
1012 
1013     return 0;
1014 }
1015 
1016 #ifdef TARGET_NR_setrlimit
1017 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1018 {
1019     abi_ulong target_rlim_swap;
1020     rlim_t result;
1021 
1022     target_rlim_swap = tswapal(target_rlim);
1023     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1024         return RLIM_INFINITY;
1025 
1026     result = target_rlim_swap;
1027     if (target_rlim_swap != (rlim_t)result)
1028         return RLIM_INFINITY;
1029 
1030     return result;
1031 }
1032 #endif
1033 
1034 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1035 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1036 {
1037     abi_ulong target_rlim_swap;
1038     abi_ulong result;
1039 
1040     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1041         target_rlim_swap = TARGET_RLIM_INFINITY;
1042     else
1043         target_rlim_swap = rlim;
1044     result = tswapal(target_rlim_swap);
1045 
1046     return result;
1047 }
1048 #endif
1049 
1050 static inline int target_to_host_resource(int code)
1051 {
1052     switch (code) {
1053     case TARGET_RLIMIT_AS:
1054         return RLIMIT_AS;
1055     case TARGET_RLIMIT_CORE:
1056         return RLIMIT_CORE;
1057     case TARGET_RLIMIT_CPU:
1058         return RLIMIT_CPU;
1059     case TARGET_RLIMIT_DATA:
1060         return RLIMIT_DATA;
1061     case TARGET_RLIMIT_FSIZE:
1062         return RLIMIT_FSIZE;
1063     case TARGET_RLIMIT_LOCKS:
1064         return RLIMIT_LOCKS;
1065     case TARGET_RLIMIT_MEMLOCK:
1066         return RLIMIT_MEMLOCK;
1067     case TARGET_RLIMIT_MSGQUEUE:
1068         return RLIMIT_MSGQUEUE;
1069     case TARGET_RLIMIT_NICE:
1070         return RLIMIT_NICE;
1071     case TARGET_RLIMIT_NOFILE:
1072         return RLIMIT_NOFILE;
1073     case TARGET_RLIMIT_NPROC:
1074         return RLIMIT_NPROC;
1075     case TARGET_RLIMIT_RSS:
1076         return RLIMIT_RSS;
1077     case TARGET_RLIMIT_RTPRIO:
1078         return RLIMIT_RTPRIO;
1079 #ifdef RLIMIT_RTTIME
1080     case TARGET_RLIMIT_RTTIME:
1081         return RLIMIT_RTTIME;
1082 #endif
1083     case TARGET_RLIMIT_SIGPENDING:
1084         return RLIMIT_SIGPENDING;
1085     case TARGET_RLIMIT_STACK:
1086         return RLIMIT_STACK;
1087     default:
1088         return code;
1089     }
1090 }
1091 
1092 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1093                                               abi_ulong target_tv_addr)
1094 {
1095     struct target_timeval *target_tv;
1096 
1097     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1098         return -TARGET_EFAULT;
1099     }
1100 
1101     __get_user(tv->tv_sec, &target_tv->tv_sec);
1102     __get_user(tv->tv_usec, &target_tv->tv_usec);
1103 
1104     unlock_user_struct(target_tv, target_tv_addr, 0);
1105 
1106     return 0;
1107 }
1108 
1109 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1110                                             const struct timeval *tv)
1111 {
1112     struct target_timeval *target_tv;
1113 
1114     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1115         return -TARGET_EFAULT;
1116     }
1117 
1118     __put_user(tv->tv_sec, &target_tv->tv_sec);
1119     __put_user(tv->tv_usec, &target_tv->tv_usec);
1120 
1121     unlock_user_struct(target_tv, target_tv_addr, 1);
1122 
1123     return 0;
1124 }
1125 
1126 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1127 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1128                                                 abi_ulong target_tv_addr)
1129 {
1130     struct target__kernel_sock_timeval *target_tv;
1131 
1132     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1133         return -TARGET_EFAULT;
1134     }
1135 
1136     __get_user(tv->tv_sec, &target_tv->tv_sec);
1137     __get_user(tv->tv_usec, &target_tv->tv_usec);
1138 
1139     unlock_user_struct(target_tv, target_tv_addr, 0);
1140 
1141     return 0;
1142 }
1143 #endif
1144 
1145 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1146                                               const struct timeval *tv)
1147 {
1148     struct target__kernel_sock_timeval *target_tv;
1149 
1150     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1151         return -TARGET_EFAULT;
1152     }
1153 
1154     __put_user(tv->tv_sec, &target_tv->tv_sec);
1155     __put_user(tv->tv_usec, &target_tv->tv_usec);
1156 
1157     unlock_user_struct(target_tv, target_tv_addr, 1);
1158 
1159     return 0;
1160 }
1161 
1162 #if defined(TARGET_NR_futex) || \
1163     defined(TARGET_NR_rt_sigtimedwait) || \
1164     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1165     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1166     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1167     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1168     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1169     defined(TARGET_NR_timer_settime) || \
1170     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1171 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1172                                                abi_ulong target_addr)
1173 {
1174     struct target_timespec *target_ts;
1175 
1176     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1177         return -TARGET_EFAULT;
1178     }
1179     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1180     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1181     unlock_user_struct(target_ts, target_addr, 0);
1182     return 0;
1183 }
1184 #endif
1185 
1186 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1187     defined(TARGET_NR_timer_settime64) || \
1188     defined(TARGET_NR_mq_timedsend_time64) || \
1189     defined(TARGET_NR_mq_timedreceive_time64) || \
1190     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1191     defined(TARGET_NR_clock_nanosleep_time64) || \
1192     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1193     defined(TARGET_NR_utimensat) || \
1194     defined(TARGET_NR_utimensat_time64) || \
1195     defined(TARGET_NR_semtimedop_time64) || \
1196     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1197 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1198                                                  abi_ulong target_addr)
1199 {
1200     struct target__kernel_timespec *target_ts;
1201 
1202     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1203         return -TARGET_EFAULT;
1204     }
1205     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1206     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1207     /* in 32bit mode, this drops the padding */
1208     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1209     unlock_user_struct(target_ts, target_addr, 0);
1210     return 0;
1211 }
1212 #endif
1213 
1214 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1215                                                struct timespec *host_ts)
1216 {
1217     struct target_timespec *target_ts;
1218 
1219     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1220         return -TARGET_EFAULT;
1221     }
1222     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1223     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1224     unlock_user_struct(target_ts, target_addr, 1);
1225     return 0;
1226 }
1227 
1228 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1229                                                  struct timespec *host_ts)
1230 {
1231     struct target__kernel_timespec *target_ts;
1232 
1233     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1234         return -TARGET_EFAULT;
1235     }
1236     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1237     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1238     unlock_user_struct(target_ts, target_addr, 1);
1239     return 0;
1240 }
1241 
1242 #if defined(TARGET_NR_gettimeofday)
1243 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1244                                              struct timezone *tz)
1245 {
1246     struct target_timezone *target_tz;
1247 
1248     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1249         return -TARGET_EFAULT;
1250     }
1251 
1252     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1253     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1254 
1255     unlock_user_struct(target_tz, target_tz_addr, 1);
1256 
1257     return 0;
1258 }
1259 #endif
1260 
1261 #if defined(TARGET_NR_settimeofday)
1262 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1263                                                abi_ulong target_tz_addr)
1264 {
1265     struct target_timezone *target_tz;
1266 
1267     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1268         return -TARGET_EFAULT;
1269     }
1270 
1271     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1272     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1273 
1274     unlock_user_struct(target_tz, target_tz_addr, 0);
1275 
1276     return 0;
1277 }
1278 #endif
1279 
1280 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1281 #include <mqueue.h>
1282 
1283 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1284                                               abi_ulong target_mq_attr_addr)
1285 {
1286     struct target_mq_attr *target_mq_attr;
1287 
1288     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1289                           target_mq_attr_addr, 1))
1290         return -TARGET_EFAULT;
1291 
1292     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1293     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1294     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1295     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1296 
1297     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1298 
1299     return 0;
1300 }
1301 
1302 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1303                                             const struct mq_attr *attr)
1304 {
1305     struct target_mq_attr *target_mq_attr;
1306 
1307     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1308                           target_mq_attr_addr, 0))
1309         return -TARGET_EFAULT;
1310 
1311     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1312     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1313     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1314     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1315 
1316     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1317 
1318     return 0;
1319 }
1320 #endif
1321 
1322 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1323 /* do_select() must return target values and target errnos. */
1324 static abi_long do_select(int n,
1325                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1326                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1327 {
1328     fd_set rfds, wfds, efds;
1329     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1330     struct timeval tv;
1331     struct timespec ts, *ts_ptr;
1332     abi_long ret;
1333 
1334     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1335     if (ret) {
1336         return ret;
1337     }
1338     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1339     if (ret) {
1340         return ret;
1341     }
1342     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1343     if (ret) {
1344         return ret;
1345     }
1346 
1347     if (target_tv_addr) {
1348         if (copy_from_user_timeval(&tv, target_tv_addr))
1349             return -TARGET_EFAULT;
1350         ts.tv_sec = tv.tv_sec;
1351         ts.tv_nsec = tv.tv_usec * 1000;
1352         ts_ptr = &ts;
1353     } else {
1354         ts_ptr = NULL;
1355     }
1356 
1357     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1358                                   ts_ptr, NULL));
1359 
1360     if (!is_error(ret)) {
1361         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1362             return -TARGET_EFAULT;
1363         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1364             return -TARGET_EFAULT;
1365         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1366             return -TARGET_EFAULT;
1367 
1368         if (target_tv_addr) {
1369             tv.tv_sec = ts.tv_sec;
1370             tv.tv_usec = ts.tv_nsec / 1000;
1371             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1372                 return -TARGET_EFAULT;
1373             }
1374         }
1375     }
1376 
1377     return ret;
1378 }
1379 
1380 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1381 static abi_long do_old_select(abi_ulong arg1)
1382 {
1383     struct target_sel_arg_struct *sel;
1384     abi_ulong inp, outp, exp, tvp;
1385     long nsel;
1386 
1387     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1388         return -TARGET_EFAULT;
1389     }
1390 
1391     nsel = tswapal(sel->n);
1392     inp = tswapal(sel->inp);
1393     outp = tswapal(sel->outp);
1394     exp = tswapal(sel->exp);
1395     tvp = tswapal(sel->tvp);
1396 
1397     unlock_user_struct(sel, arg1, 0);
1398 
1399     return do_select(nsel, inp, outp, exp, tvp);
1400 }
1401 #endif
1402 #endif
1403 
1404 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1405 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1406                             abi_long arg4, abi_long arg5, abi_long arg6,
1407                             bool time64)
1408 {
1409     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1410     fd_set rfds, wfds, efds;
1411     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1412     struct timespec ts, *ts_ptr;
1413     abi_long ret;
1414 
1415     /*
1416      * The 6th arg is actually two args smashed together,
1417      * so we cannot use the C library.
1418      */
1419     struct {
1420         sigset_t *set;
1421         size_t size;
1422     } sig, *sig_ptr;
1423 
1424     abi_ulong arg_sigset, arg_sigsize, *arg7;
1425 
1426     n = arg1;
1427     rfd_addr = arg2;
1428     wfd_addr = arg3;
1429     efd_addr = arg4;
1430     ts_addr = arg5;
1431 
1432     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1433     if (ret) {
1434         return ret;
1435     }
1436     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1437     if (ret) {
1438         return ret;
1439     }
1440     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1441     if (ret) {
1442         return ret;
1443     }
1444 
1445     /*
1446      * This takes a timespec, and not a timeval, so we cannot
1447      * use the do_select() helper ...
1448      */
1449     if (ts_addr) {
1450         if (time64) {
1451             if (target_to_host_timespec64(&ts, ts_addr)) {
1452                 return -TARGET_EFAULT;
1453             }
1454         } else {
1455             if (target_to_host_timespec(&ts, ts_addr)) {
1456                 return -TARGET_EFAULT;
1457             }
1458         }
1459             ts_ptr = &ts;
1460     } else {
1461         ts_ptr = NULL;
1462     }
1463 
1464     /* Extract the two packed args for the sigset */
1465     sig_ptr = NULL;
1466     if (arg6) {
1467         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1468         if (!arg7) {
1469             return -TARGET_EFAULT;
1470         }
1471         arg_sigset = tswapal(arg7[0]);
1472         arg_sigsize = tswapal(arg7[1]);
1473         unlock_user(arg7, arg6, 0);
1474 
1475         if (arg_sigset) {
1476             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1477             if (ret != 0) {
1478                 return ret;
1479             }
1480             sig_ptr = &sig;
1481             sig.size = SIGSET_T_SIZE;
1482         }
1483     }
1484 
1485     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1486                                   ts_ptr, sig_ptr));
1487 
1488     if (sig_ptr) {
1489         finish_sigsuspend_mask(ret);
1490     }
1491 
1492     if (!is_error(ret)) {
1493         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1494             return -TARGET_EFAULT;
1495         }
1496         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1497             return -TARGET_EFAULT;
1498         }
1499         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1500             return -TARGET_EFAULT;
1501         }
1502         if (time64) {
1503             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1504                 return -TARGET_EFAULT;
1505             }
1506         } else {
1507             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1508                 return -TARGET_EFAULT;
1509             }
1510         }
1511     }
1512     return ret;
1513 }
1514 #endif
1515 
1516 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1517     defined(TARGET_NR_ppoll_time64)
1518 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1519                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1520 {
1521     struct target_pollfd *target_pfd;
1522     unsigned int nfds = arg2;
1523     struct pollfd *pfd;
1524     unsigned int i;
1525     abi_long ret;
1526 
1527     pfd = NULL;
1528     target_pfd = NULL;
1529     if (nfds) {
1530         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1531             return -TARGET_EINVAL;
1532         }
1533         target_pfd = lock_user(VERIFY_WRITE, arg1,
1534                                sizeof(struct target_pollfd) * nfds, 1);
1535         if (!target_pfd) {
1536             return -TARGET_EFAULT;
1537         }
1538 
1539         pfd = alloca(sizeof(struct pollfd) * nfds);
1540         for (i = 0; i < nfds; i++) {
1541             pfd[i].fd = tswap32(target_pfd[i].fd);
1542             pfd[i].events = tswap16(target_pfd[i].events);
1543         }
1544     }
1545     if (ppoll) {
1546         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1547         sigset_t *set = NULL;
1548 
1549         if (arg3) {
1550             if (time64) {
1551                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1552                     unlock_user(target_pfd, arg1, 0);
1553                     return -TARGET_EFAULT;
1554                 }
1555             } else {
1556                 if (target_to_host_timespec(timeout_ts, arg3)) {
1557                     unlock_user(target_pfd, arg1, 0);
1558                     return -TARGET_EFAULT;
1559                 }
1560             }
1561         } else {
1562             timeout_ts = NULL;
1563         }
1564 
1565         if (arg4) {
1566             ret = process_sigsuspend_mask(&set, arg4, arg5);
1567             if (ret != 0) {
1568                 unlock_user(target_pfd, arg1, 0);
1569                 return ret;
1570             }
1571         }
1572 
1573         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1574                                    set, SIGSET_T_SIZE));
1575 
1576         if (set) {
1577             finish_sigsuspend_mask(ret);
1578         }
1579         if (!is_error(ret) && arg3) {
1580             if (time64) {
1581                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1582                     return -TARGET_EFAULT;
1583                 }
1584             } else {
1585                 if (host_to_target_timespec(arg3, timeout_ts)) {
1586                     return -TARGET_EFAULT;
1587                 }
1588             }
1589         }
1590     } else {
1591           struct timespec ts, *pts;
1592 
1593           if (arg3 >= 0) {
1594               /* Convert ms to secs, ns */
1595               ts.tv_sec = arg3 / 1000;
1596               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1597               pts = &ts;
1598           } else {
1599               /* -ve poll() timeout means "infinite" */
1600               pts = NULL;
1601           }
1602           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1603     }
1604 
1605     if (!is_error(ret)) {
1606         for (i = 0; i < nfds; i++) {
1607             target_pfd[i].revents = tswap16(pfd[i].revents);
1608         }
1609     }
1610     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1611     return ret;
1612 }
1613 #endif
1614 
1615 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1616                         int flags, int is_pipe2)
1617 {
1618     int host_pipe[2];
1619     abi_long ret;
1620     ret = pipe2(host_pipe, flags);
1621 
1622     if (is_error(ret))
1623         return get_errno(ret);
1624 
1625     /* Several targets have special calling conventions for the original
1626        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1627     if (!is_pipe2) {
1628 #if defined(TARGET_ALPHA)
1629         cpu_env->ir[IR_A4] = host_pipe[1];
1630         return host_pipe[0];
1631 #elif defined(TARGET_MIPS)
1632         cpu_env->active_tc.gpr[3] = host_pipe[1];
1633         return host_pipe[0];
1634 #elif defined(TARGET_SH4)
1635         cpu_env->gregs[1] = host_pipe[1];
1636         return host_pipe[0];
1637 #elif defined(TARGET_SPARC)
1638         cpu_env->regwptr[1] = host_pipe[1];
1639         return host_pipe[0];
1640 #endif
1641     }
1642 
1643     if (put_user_s32(host_pipe[0], pipedes)
1644         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1645         return -TARGET_EFAULT;
1646     return get_errno(ret);
1647 }
1648 
1649 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1650                                               abi_ulong target_addr,
1651                                               socklen_t len)
1652 {
1653     struct target_ip_mreqn *target_smreqn;
1654 
1655     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1656     if (!target_smreqn)
1657         return -TARGET_EFAULT;
1658     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1659     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1660     if (len == sizeof(struct target_ip_mreqn))
1661         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1662     unlock_user(target_smreqn, target_addr, 0);
1663 
1664     return 0;
1665 }
1666 
1667 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1668                                                abi_ulong target_addr,
1669                                                socklen_t len)
1670 {
1671     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1672     sa_family_t sa_family;
1673     struct target_sockaddr *target_saddr;
1674 
1675     if (fd_trans_target_to_host_addr(fd)) {
1676         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1677     }
1678 
1679     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1680     if (!target_saddr)
1681         return -TARGET_EFAULT;
1682 
1683     sa_family = tswap16(target_saddr->sa_family);
1684 
1685     /* Oops. The caller might send a incomplete sun_path; sun_path
1686      * must be terminated by \0 (see the manual page), but
1687      * unfortunately it is quite common to specify sockaddr_un
1688      * length as "strlen(x->sun_path)" while it should be
1689      * "strlen(...) + 1". We'll fix that here if needed.
1690      * Linux kernel has a similar feature.
1691      */
1692 
1693     if (sa_family == AF_UNIX) {
1694         if (len < unix_maxlen && len > 0) {
1695             char *cp = (char*)target_saddr;
1696 
1697             if ( cp[len-1] && !cp[len] )
1698                 len++;
1699         }
1700         if (len > unix_maxlen)
1701             len = unix_maxlen;
1702     }
1703 
1704     memcpy(addr, target_saddr, len);
1705     addr->sa_family = sa_family;
1706     if (sa_family == AF_NETLINK) {
1707         struct sockaddr_nl *nladdr;
1708 
1709         nladdr = (struct sockaddr_nl *)addr;
1710         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1711         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1712     } else if (sa_family == AF_PACKET) {
1713 	struct target_sockaddr_ll *lladdr;
1714 
1715 	lladdr = (struct target_sockaddr_ll *)addr;
1716 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1717 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1718     } else if (sa_family == AF_INET6) {
1719         struct sockaddr_in6 *in6addr;
1720 
1721         in6addr = (struct sockaddr_in6 *)addr;
1722         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1723     }
1724     unlock_user(target_saddr, target_addr, 0);
1725 
1726     return 0;
1727 }
1728 
1729 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1730                                                struct sockaddr *addr,
1731                                                socklen_t len)
1732 {
1733     struct target_sockaddr *target_saddr;
1734 
1735     if (len == 0) {
1736         return 0;
1737     }
1738     assert(addr);
1739 
1740     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1741     if (!target_saddr)
1742         return -TARGET_EFAULT;
1743     memcpy(target_saddr, addr, len);
1744     if (len >= offsetof(struct target_sockaddr, sa_family) +
1745         sizeof(target_saddr->sa_family)) {
1746         target_saddr->sa_family = tswap16(addr->sa_family);
1747     }
1748     if (addr->sa_family == AF_NETLINK &&
1749         len >= sizeof(struct target_sockaddr_nl)) {
1750         struct target_sockaddr_nl *target_nl =
1751                (struct target_sockaddr_nl *)target_saddr;
1752         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1753         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1754     } else if (addr->sa_family == AF_PACKET) {
1755         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1756         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1757         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1758     } else if (addr->sa_family == AF_INET6 &&
1759                len >= sizeof(struct target_sockaddr_in6)) {
1760         struct target_sockaddr_in6 *target_in6 =
1761                (struct target_sockaddr_in6 *)target_saddr;
1762         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1763     }
1764     unlock_user(target_saddr, target_addr, len);
1765 
1766     return 0;
1767 }
1768 
1769 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1770                                            struct target_msghdr *target_msgh)
1771 {
1772     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1773     abi_long msg_controllen;
1774     abi_ulong target_cmsg_addr;
1775     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1776     socklen_t space = 0;
1777 
1778     msg_controllen = tswapal(target_msgh->msg_controllen);
1779     if (msg_controllen < sizeof (struct target_cmsghdr))
1780         goto the_end;
1781     target_cmsg_addr = tswapal(target_msgh->msg_control);
1782     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1783     target_cmsg_start = target_cmsg;
1784     if (!target_cmsg)
1785         return -TARGET_EFAULT;
1786 
1787     while (cmsg && target_cmsg) {
1788         void *data = CMSG_DATA(cmsg);
1789         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1790 
1791         int len = tswapal(target_cmsg->cmsg_len)
1792             - sizeof(struct target_cmsghdr);
1793 
1794         space += CMSG_SPACE(len);
1795         if (space > msgh->msg_controllen) {
1796             space -= CMSG_SPACE(len);
1797             /* This is a QEMU bug, since we allocated the payload
1798              * area ourselves (unlike overflow in host-to-target
1799              * conversion, which is just the guest giving us a buffer
1800              * that's too small). It can't happen for the payload types
1801              * we currently support; if it becomes an issue in future
1802              * we would need to improve our allocation strategy to
1803              * something more intelligent than "twice the size of the
1804              * target buffer we're reading from".
1805              */
1806             qemu_log_mask(LOG_UNIMP,
1807                           ("Unsupported ancillary data %d/%d: "
1808                            "unhandled msg size\n"),
1809                           tswap32(target_cmsg->cmsg_level),
1810                           tswap32(target_cmsg->cmsg_type));
1811             break;
1812         }
1813 
1814         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1815             cmsg->cmsg_level = SOL_SOCKET;
1816         } else {
1817             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1818         }
1819         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1820         cmsg->cmsg_len = CMSG_LEN(len);
1821 
1822         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1823             int *fd = (int *)data;
1824             int *target_fd = (int *)target_data;
1825             int i, numfds = len / sizeof(int);
1826 
1827             for (i = 0; i < numfds; i++) {
1828                 __get_user(fd[i], target_fd + i);
1829             }
1830         } else if (cmsg->cmsg_level == SOL_SOCKET
1831                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1832             struct ucred *cred = (struct ucred *)data;
1833             struct target_ucred *target_cred =
1834                 (struct target_ucred *)target_data;
1835 
1836             __get_user(cred->pid, &target_cred->pid);
1837             __get_user(cred->uid, &target_cred->uid);
1838             __get_user(cred->gid, &target_cred->gid);
1839         } else if (cmsg->cmsg_level == SOL_ALG) {
1840             uint32_t *dst = (uint32_t *)data;
1841 
1842             memcpy(dst, target_data, len);
1843             /* fix endianess of first 32-bit word */
1844             if (len >= sizeof(uint32_t)) {
1845                 *dst = tswap32(*dst);
1846             }
1847         } else {
1848             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1849                           cmsg->cmsg_level, cmsg->cmsg_type);
1850             memcpy(data, target_data, len);
1851         }
1852 
1853         cmsg = CMSG_NXTHDR(msgh, cmsg);
1854         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1855                                          target_cmsg_start);
1856     }
1857     unlock_user(target_cmsg, target_cmsg_addr, 0);
1858  the_end:
1859     msgh->msg_controllen = space;
1860     return 0;
1861 }
1862 
1863 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1864                                            struct msghdr *msgh)
1865 {
1866     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1867     abi_long msg_controllen;
1868     abi_ulong target_cmsg_addr;
1869     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1870     socklen_t space = 0;
1871 
1872     msg_controllen = tswapal(target_msgh->msg_controllen);
1873     if (msg_controllen < sizeof (struct target_cmsghdr))
1874         goto the_end;
1875     target_cmsg_addr = tswapal(target_msgh->msg_control);
1876     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1877     target_cmsg_start = target_cmsg;
1878     if (!target_cmsg)
1879         return -TARGET_EFAULT;
1880 
1881     while (cmsg && target_cmsg) {
1882         void *data = CMSG_DATA(cmsg);
1883         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1884 
1885         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1886         int tgt_len, tgt_space;
1887 
1888         /* We never copy a half-header but may copy half-data;
1889          * this is Linux's behaviour in put_cmsg(). Note that
1890          * truncation here is a guest problem (which we report
1891          * to the guest via the CTRUNC bit), unlike truncation
1892          * in target_to_host_cmsg, which is a QEMU bug.
1893          */
1894         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1895             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1896             break;
1897         }
1898 
1899         if (cmsg->cmsg_level == SOL_SOCKET) {
1900             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1901         } else {
1902             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1903         }
1904         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1905 
1906         /* Payload types which need a different size of payload on
1907          * the target must adjust tgt_len here.
1908          */
1909         tgt_len = len;
1910         switch (cmsg->cmsg_level) {
1911         case SOL_SOCKET:
1912             switch (cmsg->cmsg_type) {
1913             case SO_TIMESTAMP:
1914                 tgt_len = sizeof(struct target_timeval);
1915                 break;
1916             default:
1917                 break;
1918             }
1919             break;
1920         default:
1921             break;
1922         }
1923 
1924         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1925             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1926             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1927         }
1928 
1929         /* We must now copy-and-convert len bytes of payload
1930          * into tgt_len bytes of destination space. Bear in mind
1931          * that in both source and destination we may be dealing
1932          * with a truncated value!
1933          */
1934         switch (cmsg->cmsg_level) {
1935         case SOL_SOCKET:
1936             switch (cmsg->cmsg_type) {
1937             case SCM_RIGHTS:
1938             {
1939                 int *fd = (int *)data;
1940                 int *target_fd = (int *)target_data;
1941                 int i, numfds = tgt_len / sizeof(int);
1942 
1943                 for (i = 0; i < numfds; i++) {
1944                     __put_user(fd[i], target_fd + i);
1945                 }
1946                 break;
1947             }
1948             case SO_TIMESTAMP:
1949             {
1950                 struct timeval *tv = (struct timeval *)data;
1951                 struct target_timeval *target_tv =
1952                     (struct target_timeval *)target_data;
1953 
1954                 if (len != sizeof(struct timeval) ||
1955                     tgt_len != sizeof(struct target_timeval)) {
1956                     goto unimplemented;
1957                 }
1958 
1959                 /* copy struct timeval to target */
1960                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1961                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1962                 break;
1963             }
1964             case SCM_CREDENTIALS:
1965             {
1966                 struct ucred *cred = (struct ucred *)data;
1967                 struct target_ucred *target_cred =
1968                     (struct target_ucred *)target_data;
1969 
1970                 __put_user(cred->pid, &target_cred->pid);
1971                 __put_user(cred->uid, &target_cred->uid);
1972                 __put_user(cred->gid, &target_cred->gid);
1973                 break;
1974             }
1975             default:
1976                 goto unimplemented;
1977             }
1978             break;
1979 
1980         case SOL_IP:
1981             switch (cmsg->cmsg_type) {
1982             case IP_TTL:
1983             {
1984                 uint32_t *v = (uint32_t *)data;
1985                 uint32_t *t_int = (uint32_t *)target_data;
1986 
1987                 if (len != sizeof(uint32_t) ||
1988                     tgt_len != sizeof(uint32_t)) {
1989                     goto unimplemented;
1990                 }
1991                 __put_user(*v, t_int);
1992                 break;
1993             }
1994             case IP_RECVERR:
1995             {
1996                 struct errhdr_t {
1997                    struct sock_extended_err ee;
1998                    struct sockaddr_in offender;
1999                 };
2000                 struct errhdr_t *errh = (struct errhdr_t *)data;
2001                 struct errhdr_t *target_errh =
2002                     (struct errhdr_t *)target_data;
2003 
2004                 if (len != sizeof(struct errhdr_t) ||
2005                     tgt_len != sizeof(struct errhdr_t)) {
2006                     goto unimplemented;
2007                 }
2008                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2009                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2010                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2011                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2012                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2013                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2014                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2015                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2016                     (void *) &errh->offender, sizeof(errh->offender));
2017                 break;
2018             }
2019             default:
2020                 goto unimplemented;
2021             }
2022             break;
2023 
2024         case SOL_IPV6:
2025             switch (cmsg->cmsg_type) {
2026             case IPV6_HOPLIMIT:
2027             {
2028                 uint32_t *v = (uint32_t *)data;
2029                 uint32_t *t_int = (uint32_t *)target_data;
2030 
2031                 if (len != sizeof(uint32_t) ||
2032                     tgt_len != sizeof(uint32_t)) {
2033                     goto unimplemented;
2034                 }
2035                 __put_user(*v, t_int);
2036                 break;
2037             }
2038             case IPV6_RECVERR:
2039             {
2040                 struct errhdr6_t {
2041                    struct sock_extended_err ee;
2042                    struct sockaddr_in6 offender;
2043                 };
2044                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2045                 struct errhdr6_t *target_errh =
2046                     (struct errhdr6_t *)target_data;
2047 
2048                 if (len != sizeof(struct errhdr6_t) ||
2049                     tgt_len != sizeof(struct errhdr6_t)) {
2050                     goto unimplemented;
2051                 }
2052                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2053                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2054                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2055                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2056                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2057                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2058                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2059                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2060                     (void *) &errh->offender, sizeof(errh->offender));
2061                 break;
2062             }
2063             default:
2064                 goto unimplemented;
2065             }
2066             break;
2067 
2068         default:
2069         unimplemented:
2070             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2071                           cmsg->cmsg_level, cmsg->cmsg_type);
2072             memcpy(target_data, data, MIN(len, tgt_len));
2073             if (tgt_len > len) {
2074                 memset(target_data + len, 0, tgt_len - len);
2075             }
2076         }
2077 
2078         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2079         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2080         if (msg_controllen < tgt_space) {
2081             tgt_space = msg_controllen;
2082         }
2083         msg_controllen -= tgt_space;
2084         space += tgt_space;
2085         cmsg = CMSG_NXTHDR(msgh, cmsg);
2086         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2087                                          target_cmsg_start);
2088     }
2089     unlock_user(target_cmsg, target_cmsg_addr, space);
2090  the_end:
2091     target_msgh->msg_controllen = tswapal(space);
2092     return 0;
2093 }
2094 
2095 /* do_setsockopt() Must return target values and target errnos. */
2096 static abi_long do_setsockopt(int sockfd, int level, int optname,
2097                               abi_ulong optval_addr, socklen_t optlen)
2098 {
2099     abi_long ret;
2100     int val;
2101     struct ip_mreqn *ip_mreq;
2102     struct ip_mreq_source *ip_mreq_source;
2103 
2104     switch(level) {
2105     case SOL_TCP:
2106     case SOL_UDP:
2107         /* TCP and UDP options all take an 'int' value.  */
2108         if (optlen < sizeof(uint32_t))
2109             return -TARGET_EINVAL;
2110 
2111         if (get_user_u32(val, optval_addr))
2112             return -TARGET_EFAULT;
2113         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2114         break;
2115     case SOL_IP:
2116         switch(optname) {
2117         case IP_TOS:
2118         case IP_TTL:
2119         case IP_HDRINCL:
2120         case IP_ROUTER_ALERT:
2121         case IP_RECVOPTS:
2122         case IP_RETOPTS:
2123         case IP_PKTINFO:
2124         case IP_MTU_DISCOVER:
2125         case IP_RECVERR:
2126         case IP_RECVTTL:
2127         case IP_RECVTOS:
2128 #ifdef IP_FREEBIND
2129         case IP_FREEBIND:
2130 #endif
2131         case IP_MULTICAST_TTL:
2132         case IP_MULTICAST_LOOP:
2133             val = 0;
2134             if (optlen >= sizeof(uint32_t)) {
2135                 if (get_user_u32(val, optval_addr))
2136                     return -TARGET_EFAULT;
2137             } else if (optlen >= 1) {
2138                 if (get_user_u8(val, optval_addr))
2139                     return -TARGET_EFAULT;
2140             }
2141             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2142             break;
2143         case IP_ADD_MEMBERSHIP:
2144         case IP_DROP_MEMBERSHIP:
2145             if (optlen < sizeof (struct target_ip_mreq) ||
2146                 optlen > sizeof (struct target_ip_mreqn))
2147                 return -TARGET_EINVAL;
2148 
2149             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2150             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2151             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2152             break;
2153 
2154         case IP_BLOCK_SOURCE:
2155         case IP_UNBLOCK_SOURCE:
2156         case IP_ADD_SOURCE_MEMBERSHIP:
2157         case IP_DROP_SOURCE_MEMBERSHIP:
2158             if (optlen != sizeof (struct target_ip_mreq_source))
2159                 return -TARGET_EINVAL;
2160 
2161             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2162             if (!ip_mreq_source) {
2163                 return -TARGET_EFAULT;
2164             }
2165             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2166             unlock_user (ip_mreq_source, optval_addr, 0);
2167             break;
2168 
2169         default:
2170             goto unimplemented;
2171         }
2172         break;
2173     case SOL_IPV6:
2174         switch (optname) {
2175         case IPV6_MTU_DISCOVER:
2176         case IPV6_MTU:
2177         case IPV6_V6ONLY:
2178         case IPV6_RECVPKTINFO:
2179         case IPV6_UNICAST_HOPS:
2180         case IPV6_MULTICAST_HOPS:
2181         case IPV6_MULTICAST_LOOP:
2182         case IPV6_RECVERR:
2183         case IPV6_RECVHOPLIMIT:
2184         case IPV6_2292HOPLIMIT:
2185         case IPV6_CHECKSUM:
2186         case IPV6_ADDRFORM:
2187         case IPV6_2292PKTINFO:
2188         case IPV6_RECVTCLASS:
2189         case IPV6_RECVRTHDR:
2190         case IPV6_2292RTHDR:
2191         case IPV6_RECVHOPOPTS:
2192         case IPV6_2292HOPOPTS:
2193         case IPV6_RECVDSTOPTS:
2194         case IPV6_2292DSTOPTS:
2195         case IPV6_TCLASS:
2196         case IPV6_ADDR_PREFERENCES:
2197 #ifdef IPV6_RECVPATHMTU
2198         case IPV6_RECVPATHMTU:
2199 #endif
2200 #ifdef IPV6_TRANSPARENT
2201         case IPV6_TRANSPARENT:
2202 #endif
2203 #ifdef IPV6_FREEBIND
2204         case IPV6_FREEBIND:
2205 #endif
2206 #ifdef IPV6_RECVORIGDSTADDR
2207         case IPV6_RECVORIGDSTADDR:
2208 #endif
2209             val = 0;
2210             if (optlen < sizeof(uint32_t)) {
2211                 return -TARGET_EINVAL;
2212             }
2213             if (get_user_u32(val, optval_addr)) {
2214                 return -TARGET_EFAULT;
2215             }
2216             ret = get_errno(setsockopt(sockfd, level, optname,
2217                                        &val, sizeof(val)));
2218             break;
2219         case IPV6_PKTINFO:
2220         {
2221             struct in6_pktinfo pki;
2222 
2223             if (optlen < sizeof(pki)) {
2224                 return -TARGET_EINVAL;
2225             }
2226 
2227             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2228                 return -TARGET_EFAULT;
2229             }
2230 
2231             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2232 
2233             ret = get_errno(setsockopt(sockfd, level, optname,
2234                                        &pki, sizeof(pki)));
2235             break;
2236         }
2237         case IPV6_ADD_MEMBERSHIP:
2238         case IPV6_DROP_MEMBERSHIP:
2239         {
2240             struct ipv6_mreq ipv6mreq;
2241 
2242             if (optlen < sizeof(ipv6mreq)) {
2243                 return -TARGET_EINVAL;
2244             }
2245 
2246             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2247                 return -TARGET_EFAULT;
2248             }
2249 
2250             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2251 
2252             ret = get_errno(setsockopt(sockfd, level, optname,
2253                                        &ipv6mreq, sizeof(ipv6mreq)));
2254             break;
2255         }
2256         default:
2257             goto unimplemented;
2258         }
2259         break;
2260     case SOL_ICMPV6:
2261         switch (optname) {
2262         case ICMPV6_FILTER:
2263         {
2264             struct icmp6_filter icmp6f;
2265 
2266             if (optlen > sizeof(icmp6f)) {
2267                 optlen = sizeof(icmp6f);
2268             }
2269 
2270             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2271                 return -TARGET_EFAULT;
2272             }
2273 
2274             for (val = 0; val < 8; val++) {
2275                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2276             }
2277 
2278             ret = get_errno(setsockopt(sockfd, level, optname,
2279                                        &icmp6f, optlen));
2280             break;
2281         }
2282         default:
2283             goto unimplemented;
2284         }
2285         break;
2286     case SOL_RAW:
2287         switch (optname) {
2288         case ICMP_FILTER:
2289         case IPV6_CHECKSUM:
2290             /* those take an u32 value */
2291             if (optlen < sizeof(uint32_t)) {
2292                 return -TARGET_EINVAL;
2293             }
2294 
2295             if (get_user_u32(val, optval_addr)) {
2296                 return -TARGET_EFAULT;
2297             }
2298             ret = get_errno(setsockopt(sockfd, level, optname,
2299                                        &val, sizeof(val)));
2300             break;
2301 
2302         default:
2303             goto unimplemented;
2304         }
2305         break;
2306 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2307     case SOL_ALG:
2308         switch (optname) {
2309         case ALG_SET_KEY:
2310         {
2311             char *alg_key = g_malloc(optlen);
2312 
2313             if (!alg_key) {
2314                 return -TARGET_ENOMEM;
2315             }
2316             if (copy_from_user(alg_key, optval_addr, optlen)) {
2317                 g_free(alg_key);
2318                 return -TARGET_EFAULT;
2319             }
2320             ret = get_errno(setsockopt(sockfd, level, optname,
2321                                        alg_key, optlen));
2322             g_free(alg_key);
2323             break;
2324         }
2325         case ALG_SET_AEAD_AUTHSIZE:
2326         {
2327             ret = get_errno(setsockopt(sockfd, level, optname,
2328                                        NULL, optlen));
2329             break;
2330         }
2331         default:
2332             goto unimplemented;
2333         }
2334         break;
2335 #endif
2336     case TARGET_SOL_SOCKET:
2337         switch (optname) {
2338         case TARGET_SO_RCVTIMEO:
2339         {
2340                 struct timeval tv;
2341 
2342                 optname = SO_RCVTIMEO;
2343 
2344 set_timeout:
2345                 if (optlen != sizeof(struct target_timeval)) {
2346                     return -TARGET_EINVAL;
2347                 }
2348 
2349                 if (copy_from_user_timeval(&tv, optval_addr)) {
2350                     return -TARGET_EFAULT;
2351                 }
2352 
2353                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2354                                 &tv, sizeof(tv)));
2355                 return ret;
2356         }
2357         case TARGET_SO_SNDTIMEO:
2358                 optname = SO_SNDTIMEO;
2359                 goto set_timeout;
2360         case TARGET_SO_ATTACH_FILTER:
2361         {
2362                 struct target_sock_fprog *tfprog;
2363                 struct target_sock_filter *tfilter;
2364                 struct sock_fprog fprog;
2365                 struct sock_filter *filter;
2366                 int i;
2367 
2368                 if (optlen != sizeof(*tfprog)) {
2369                     return -TARGET_EINVAL;
2370                 }
2371                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2372                     return -TARGET_EFAULT;
2373                 }
2374                 if (!lock_user_struct(VERIFY_READ, tfilter,
2375                                       tswapal(tfprog->filter), 0)) {
2376                     unlock_user_struct(tfprog, optval_addr, 1);
2377                     return -TARGET_EFAULT;
2378                 }
2379 
2380                 fprog.len = tswap16(tfprog->len);
2381                 filter = g_try_new(struct sock_filter, fprog.len);
2382                 if (filter == NULL) {
2383                     unlock_user_struct(tfilter, tfprog->filter, 1);
2384                     unlock_user_struct(tfprog, optval_addr, 1);
2385                     return -TARGET_ENOMEM;
2386                 }
2387                 for (i = 0; i < fprog.len; i++) {
2388                     filter[i].code = tswap16(tfilter[i].code);
2389                     filter[i].jt = tfilter[i].jt;
2390                     filter[i].jf = tfilter[i].jf;
2391                     filter[i].k = tswap32(tfilter[i].k);
2392                 }
2393                 fprog.filter = filter;
2394 
2395                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2396                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2397                 g_free(filter);
2398 
2399                 unlock_user_struct(tfilter, tfprog->filter, 1);
2400                 unlock_user_struct(tfprog, optval_addr, 1);
2401                 return ret;
2402         }
2403 	case TARGET_SO_BINDTODEVICE:
2404 	{
2405 		char *dev_ifname, *addr_ifname;
2406 
2407 		if (optlen > IFNAMSIZ - 1) {
2408 		    optlen = IFNAMSIZ - 1;
2409 		}
2410 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2411 		if (!dev_ifname) {
2412 		    return -TARGET_EFAULT;
2413 		}
2414 		optname = SO_BINDTODEVICE;
2415 		addr_ifname = alloca(IFNAMSIZ);
2416 		memcpy(addr_ifname, dev_ifname, optlen);
2417 		addr_ifname[optlen] = 0;
2418 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2419                                            addr_ifname, optlen));
2420 		unlock_user (dev_ifname, optval_addr, 0);
2421 		return ret;
2422 	}
2423         case TARGET_SO_LINGER:
2424         {
2425                 struct linger lg;
2426                 struct target_linger *tlg;
2427 
2428                 if (optlen != sizeof(struct target_linger)) {
2429                     return -TARGET_EINVAL;
2430                 }
2431                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2432                     return -TARGET_EFAULT;
2433                 }
2434                 __get_user(lg.l_onoff, &tlg->l_onoff);
2435                 __get_user(lg.l_linger, &tlg->l_linger);
2436                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2437                                 &lg, sizeof(lg)));
2438                 unlock_user_struct(tlg, optval_addr, 0);
2439                 return ret;
2440         }
2441             /* Options with 'int' argument.  */
2442         case TARGET_SO_DEBUG:
2443 		optname = SO_DEBUG;
2444 		break;
2445         case TARGET_SO_REUSEADDR:
2446 		optname = SO_REUSEADDR;
2447 		break;
2448 #ifdef SO_REUSEPORT
2449         case TARGET_SO_REUSEPORT:
2450                 optname = SO_REUSEPORT;
2451                 break;
2452 #endif
2453         case TARGET_SO_TYPE:
2454 		optname = SO_TYPE;
2455 		break;
2456         case TARGET_SO_ERROR:
2457 		optname = SO_ERROR;
2458 		break;
2459         case TARGET_SO_DONTROUTE:
2460 		optname = SO_DONTROUTE;
2461 		break;
2462         case TARGET_SO_BROADCAST:
2463 		optname = SO_BROADCAST;
2464 		break;
2465         case TARGET_SO_SNDBUF:
2466 		optname = SO_SNDBUF;
2467 		break;
2468         case TARGET_SO_SNDBUFFORCE:
2469                 optname = SO_SNDBUFFORCE;
2470                 break;
2471         case TARGET_SO_RCVBUF:
2472 		optname = SO_RCVBUF;
2473 		break;
2474         case TARGET_SO_RCVBUFFORCE:
2475                 optname = SO_RCVBUFFORCE;
2476                 break;
2477         case TARGET_SO_KEEPALIVE:
2478 		optname = SO_KEEPALIVE;
2479 		break;
2480         case TARGET_SO_OOBINLINE:
2481 		optname = SO_OOBINLINE;
2482 		break;
2483         case TARGET_SO_NO_CHECK:
2484 		optname = SO_NO_CHECK;
2485 		break;
2486         case TARGET_SO_PRIORITY:
2487 		optname = SO_PRIORITY;
2488 		break;
2489 #ifdef SO_BSDCOMPAT
2490         case TARGET_SO_BSDCOMPAT:
2491 		optname = SO_BSDCOMPAT;
2492 		break;
2493 #endif
2494         case TARGET_SO_PASSCRED:
2495 		optname = SO_PASSCRED;
2496 		break;
2497         case TARGET_SO_PASSSEC:
2498                 optname = SO_PASSSEC;
2499                 break;
2500         case TARGET_SO_TIMESTAMP:
2501 		optname = SO_TIMESTAMP;
2502 		break;
2503         case TARGET_SO_RCVLOWAT:
2504 		optname = SO_RCVLOWAT;
2505 		break;
2506         default:
2507             goto unimplemented;
2508         }
2509 	if (optlen < sizeof(uint32_t))
2510             return -TARGET_EINVAL;
2511 
2512 	if (get_user_u32(val, optval_addr))
2513             return -TARGET_EFAULT;
2514 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2515         break;
2516 #ifdef SOL_NETLINK
2517     case SOL_NETLINK:
2518         switch (optname) {
2519         case NETLINK_PKTINFO:
2520         case NETLINK_ADD_MEMBERSHIP:
2521         case NETLINK_DROP_MEMBERSHIP:
2522         case NETLINK_BROADCAST_ERROR:
2523         case NETLINK_NO_ENOBUFS:
2524 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2525         case NETLINK_LISTEN_ALL_NSID:
2526         case NETLINK_CAP_ACK:
2527 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2528 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2529         case NETLINK_EXT_ACK:
2530 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2531 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2532         case NETLINK_GET_STRICT_CHK:
2533 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2534             break;
2535         default:
2536             goto unimplemented;
2537         }
2538         val = 0;
2539         if (optlen < sizeof(uint32_t)) {
2540             return -TARGET_EINVAL;
2541         }
2542         if (get_user_u32(val, optval_addr)) {
2543             return -TARGET_EFAULT;
2544         }
2545         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2546                                    sizeof(val)));
2547         break;
2548 #endif /* SOL_NETLINK */
2549     default:
2550     unimplemented:
2551         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2552                       level, optname);
2553         ret = -TARGET_ENOPROTOOPT;
2554     }
2555     return ret;
2556 }
2557 
2558 /* do_getsockopt() Must return target values and target errnos. */
2559 static abi_long do_getsockopt(int sockfd, int level, int optname,
2560                               abi_ulong optval_addr, abi_ulong optlen)
2561 {
2562     abi_long ret;
2563     int len, val;
2564     socklen_t lv;
2565 
2566     switch(level) {
2567     case TARGET_SOL_SOCKET:
2568         level = SOL_SOCKET;
2569         switch (optname) {
2570         /* These don't just return a single integer */
2571         case TARGET_SO_PEERNAME:
2572             goto unimplemented;
2573         case TARGET_SO_RCVTIMEO: {
2574             struct timeval tv;
2575             socklen_t tvlen;
2576 
2577             optname = SO_RCVTIMEO;
2578 
2579 get_timeout:
2580             if (get_user_u32(len, optlen)) {
2581                 return -TARGET_EFAULT;
2582             }
2583             if (len < 0) {
2584                 return -TARGET_EINVAL;
2585             }
2586 
2587             tvlen = sizeof(tv);
2588             ret = get_errno(getsockopt(sockfd, level, optname,
2589                                        &tv, &tvlen));
2590             if (ret < 0) {
2591                 return ret;
2592             }
2593             if (len > sizeof(struct target_timeval)) {
2594                 len = sizeof(struct target_timeval);
2595             }
2596             if (copy_to_user_timeval(optval_addr, &tv)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             if (put_user_u32(len, optlen)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             break;
2603         }
2604         case TARGET_SO_SNDTIMEO:
2605             optname = SO_SNDTIMEO;
2606             goto get_timeout;
2607         case TARGET_SO_PEERCRED: {
2608             struct ucred cr;
2609             socklen_t crlen;
2610             struct target_ucred *tcr;
2611 
2612             if (get_user_u32(len, optlen)) {
2613                 return -TARGET_EFAULT;
2614             }
2615             if (len < 0) {
2616                 return -TARGET_EINVAL;
2617             }
2618 
2619             crlen = sizeof(cr);
2620             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2621                                        &cr, &crlen));
2622             if (ret < 0) {
2623                 return ret;
2624             }
2625             if (len > crlen) {
2626                 len = crlen;
2627             }
2628             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             __put_user(cr.pid, &tcr->pid);
2632             __put_user(cr.uid, &tcr->uid);
2633             __put_user(cr.gid, &tcr->gid);
2634             unlock_user_struct(tcr, optval_addr, 1);
2635             if (put_user_u32(len, optlen)) {
2636                 return -TARGET_EFAULT;
2637             }
2638             break;
2639         }
2640         case TARGET_SO_PEERSEC: {
2641             char *name;
2642 
2643             if (get_user_u32(len, optlen)) {
2644                 return -TARGET_EFAULT;
2645             }
2646             if (len < 0) {
2647                 return -TARGET_EINVAL;
2648             }
2649             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2650             if (!name) {
2651                 return -TARGET_EFAULT;
2652             }
2653             lv = len;
2654             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2655                                        name, &lv));
2656             if (put_user_u32(lv, optlen)) {
2657                 ret = -TARGET_EFAULT;
2658             }
2659             unlock_user(name, optval_addr, lv);
2660             break;
2661         }
2662         case TARGET_SO_LINGER:
2663         {
2664             struct linger lg;
2665             socklen_t lglen;
2666             struct target_linger *tlg;
2667 
2668             if (get_user_u32(len, optlen)) {
2669                 return -TARGET_EFAULT;
2670             }
2671             if (len < 0) {
2672                 return -TARGET_EINVAL;
2673             }
2674 
2675             lglen = sizeof(lg);
2676             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2677                                        &lg, &lglen));
2678             if (ret < 0) {
2679                 return ret;
2680             }
2681             if (len > lglen) {
2682                 len = lglen;
2683             }
2684             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2685                 return -TARGET_EFAULT;
2686             }
2687             __put_user(lg.l_onoff, &tlg->l_onoff);
2688             __put_user(lg.l_linger, &tlg->l_linger);
2689             unlock_user_struct(tlg, optval_addr, 1);
2690             if (put_user_u32(len, optlen)) {
2691                 return -TARGET_EFAULT;
2692             }
2693             break;
2694         }
2695         /* Options with 'int' argument.  */
2696         case TARGET_SO_DEBUG:
2697             optname = SO_DEBUG;
2698             goto int_case;
2699         case TARGET_SO_REUSEADDR:
2700             optname = SO_REUSEADDR;
2701             goto int_case;
2702 #ifdef SO_REUSEPORT
2703         case TARGET_SO_REUSEPORT:
2704             optname = SO_REUSEPORT;
2705             goto int_case;
2706 #endif
2707         case TARGET_SO_TYPE:
2708             optname = SO_TYPE;
2709             goto int_case;
2710         case TARGET_SO_ERROR:
2711             optname = SO_ERROR;
2712             goto int_case;
2713         case TARGET_SO_DONTROUTE:
2714             optname = SO_DONTROUTE;
2715             goto int_case;
2716         case TARGET_SO_BROADCAST:
2717             optname = SO_BROADCAST;
2718             goto int_case;
2719         case TARGET_SO_SNDBUF:
2720             optname = SO_SNDBUF;
2721             goto int_case;
2722         case TARGET_SO_RCVBUF:
2723             optname = SO_RCVBUF;
2724             goto int_case;
2725         case TARGET_SO_KEEPALIVE:
2726             optname = SO_KEEPALIVE;
2727             goto int_case;
2728         case TARGET_SO_OOBINLINE:
2729             optname = SO_OOBINLINE;
2730             goto int_case;
2731         case TARGET_SO_NO_CHECK:
2732             optname = SO_NO_CHECK;
2733             goto int_case;
2734         case TARGET_SO_PRIORITY:
2735             optname = SO_PRIORITY;
2736             goto int_case;
2737 #ifdef SO_BSDCOMPAT
2738         case TARGET_SO_BSDCOMPAT:
2739             optname = SO_BSDCOMPAT;
2740             goto int_case;
2741 #endif
2742         case TARGET_SO_PASSCRED:
2743             optname = SO_PASSCRED;
2744             goto int_case;
2745         case TARGET_SO_TIMESTAMP:
2746             optname = SO_TIMESTAMP;
2747             goto int_case;
2748         case TARGET_SO_RCVLOWAT:
2749             optname = SO_RCVLOWAT;
2750             goto int_case;
2751         case TARGET_SO_ACCEPTCONN:
2752             optname = SO_ACCEPTCONN;
2753             goto int_case;
2754         case TARGET_SO_PROTOCOL:
2755             optname = SO_PROTOCOL;
2756             goto int_case;
2757         case TARGET_SO_DOMAIN:
2758             optname = SO_DOMAIN;
2759             goto int_case;
2760         default:
2761             goto int_case;
2762         }
2763         break;
2764     case SOL_TCP:
2765     case SOL_UDP:
2766         /* TCP and UDP options all take an 'int' value.  */
2767     int_case:
2768         if (get_user_u32(len, optlen))
2769             return -TARGET_EFAULT;
2770         if (len < 0)
2771             return -TARGET_EINVAL;
2772         lv = sizeof(lv);
2773         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2774         if (ret < 0)
2775             return ret;
2776         switch (optname) {
2777         case SO_TYPE:
2778             val = host_to_target_sock_type(val);
2779             break;
2780         case SO_ERROR:
2781             val = host_to_target_errno(val);
2782             break;
2783         }
2784         if (len > lv)
2785             len = lv;
2786         if (len == 4) {
2787             if (put_user_u32(val, optval_addr))
2788                 return -TARGET_EFAULT;
2789         } else {
2790             if (put_user_u8(val, optval_addr))
2791                 return -TARGET_EFAULT;
2792         }
2793         if (put_user_u32(len, optlen))
2794             return -TARGET_EFAULT;
2795         break;
2796     case SOL_IP:
2797         switch(optname) {
2798         case IP_TOS:
2799         case IP_TTL:
2800         case IP_HDRINCL:
2801         case IP_ROUTER_ALERT:
2802         case IP_RECVOPTS:
2803         case IP_RETOPTS:
2804         case IP_PKTINFO:
2805         case IP_MTU_DISCOVER:
2806         case IP_RECVERR:
2807         case IP_RECVTOS:
2808 #ifdef IP_FREEBIND
2809         case IP_FREEBIND:
2810 #endif
2811         case IP_MULTICAST_TTL:
2812         case IP_MULTICAST_LOOP:
2813             if (get_user_u32(len, optlen))
2814                 return -TARGET_EFAULT;
2815             if (len < 0)
2816                 return -TARGET_EINVAL;
2817             lv = sizeof(lv);
2818             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2819             if (ret < 0)
2820                 return ret;
2821             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2822                 len = 1;
2823                 if (put_user_u32(len, optlen)
2824                     || put_user_u8(val, optval_addr))
2825                     return -TARGET_EFAULT;
2826             } else {
2827                 if (len > sizeof(int))
2828                     len = sizeof(int);
2829                 if (put_user_u32(len, optlen)
2830                     || put_user_u32(val, optval_addr))
2831                     return -TARGET_EFAULT;
2832             }
2833             break;
2834         default:
2835             ret = -TARGET_ENOPROTOOPT;
2836             break;
2837         }
2838         break;
2839     case SOL_IPV6:
2840         switch (optname) {
2841         case IPV6_MTU_DISCOVER:
2842         case IPV6_MTU:
2843         case IPV6_V6ONLY:
2844         case IPV6_RECVPKTINFO:
2845         case IPV6_UNICAST_HOPS:
2846         case IPV6_MULTICAST_HOPS:
2847         case IPV6_MULTICAST_LOOP:
2848         case IPV6_RECVERR:
2849         case IPV6_RECVHOPLIMIT:
2850         case IPV6_2292HOPLIMIT:
2851         case IPV6_CHECKSUM:
2852         case IPV6_ADDRFORM:
2853         case IPV6_2292PKTINFO:
2854         case IPV6_RECVTCLASS:
2855         case IPV6_RECVRTHDR:
2856         case IPV6_2292RTHDR:
2857         case IPV6_RECVHOPOPTS:
2858         case IPV6_2292HOPOPTS:
2859         case IPV6_RECVDSTOPTS:
2860         case IPV6_2292DSTOPTS:
2861         case IPV6_TCLASS:
2862         case IPV6_ADDR_PREFERENCES:
2863 #ifdef IPV6_RECVPATHMTU
2864         case IPV6_RECVPATHMTU:
2865 #endif
2866 #ifdef IPV6_TRANSPARENT
2867         case IPV6_TRANSPARENT:
2868 #endif
2869 #ifdef IPV6_FREEBIND
2870         case IPV6_FREEBIND:
2871 #endif
2872 #ifdef IPV6_RECVORIGDSTADDR
2873         case IPV6_RECVORIGDSTADDR:
2874 #endif
2875             if (get_user_u32(len, optlen))
2876                 return -TARGET_EFAULT;
2877             if (len < 0)
2878                 return -TARGET_EINVAL;
2879             lv = sizeof(lv);
2880             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2881             if (ret < 0)
2882                 return ret;
2883             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2884                 len = 1;
2885                 if (put_user_u32(len, optlen)
2886                     || put_user_u8(val, optval_addr))
2887                     return -TARGET_EFAULT;
2888             } else {
2889                 if (len > sizeof(int))
2890                     len = sizeof(int);
2891                 if (put_user_u32(len, optlen)
2892                     || put_user_u32(val, optval_addr))
2893                     return -TARGET_EFAULT;
2894             }
2895             break;
2896         default:
2897             ret = -TARGET_ENOPROTOOPT;
2898             break;
2899         }
2900         break;
2901 #ifdef SOL_NETLINK
2902     case SOL_NETLINK:
2903         switch (optname) {
2904         case NETLINK_PKTINFO:
2905         case NETLINK_BROADCAST_ERROR:
2906         case NETLINK_NO_ENOBUFS:
2907 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2908         case NETLINK_LISTEN_ALL_NSID:
2909         case NETLINK_CAP_ACK:
2910 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2911 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2912         case NETLINK_EXT_ACK:
2913 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2914 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2915         case NETLINK_GET_STRICT_CHK:
2916 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2917             if (get_user_u32(len, optlen)) {
2918                 return -TARGET_EFAULT;
2919             }
2920             if (len != sizeof(val)) {
2921                 return -TARGET_EINVAL;
2922             }
2923             lv = len;
2924             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2925             if (ret < 0) {
2926                 return ret;
2927             }
2928             if (put_user_u32(lv, optlen)
2929                 || put_user_u32(val, optval_addr)) {
2930                 return -TARGET_EFAULT;
2931             }
2932             break;
2933 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2934         case NETLINK_LIST_MEMBERSHIPS:
2935         {
2936             uint32_t *results;
2937             int i;
2938             if (get_user_u32(len, optlen)) {
2939                 return -TARGET_EFAULT;
2940             }
2941             if (len < 0) {
2942                 return -TARGET_EINVAL;
2943             }
2944             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2945             if (!results && len > 0) {
2946                 return -TARGET_EFAULT;
2947             }
2948             lv = len;
2949             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2950             if (ret < 0) {
2951                 unlock_user(results, optval_addr, 0);
2952                 return ret;
2953             }
2954             /* swap host endianess to target endianess. */
2955             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2956                 results[i] = tswap32(results[i]);
2957             }
2958             if (put_user_u32(lv, optlen)) {
2959                 return -TARGET_EFAULT;
2960             }
2961             unlock_user(results, optval_addr, 0);
2962             break;
2963         }
2964 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2965         default:
2966             goto unimplemented;
2967         }
2968         break;
2969 #endif /* SOL_NETLINK */
2970     default:
2971     unimplemented:
2972         qemu_log_mask(LOG_UNIMP,
2973                       "getsockopt level=%d optname=%d not yet supported\n",
2974                       level, optname);
2975         ret = -TARGET_EOPNOTSUPP;
2976         break;
2977     }
2978     return ret;
2979 }
2980 
2981 /* Convert target low/high pair representing file offset into the host
2982  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2983  * as the kernel doesn't handle them either.
2984  */
2985 static void target_to_host_low_high(abi_ulong tlow,
2986                                     abi_ulong thigh,
2987                                     unsigned long *hlow,
2988                                     unsigned long *hhigh)
2989 {
2990     uint64_t off = tlow |
2991         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2992         TARGET_LONG_BITS / 2;
2993 
2994     *hlow = off;
2995     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2996 }
2997 
2998 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2999                                 abi_ulong count, int copy)
3000 {
3001     struct target_iovec *target_vec;
3002     struct iovec *vec;
3003     abi_ulong total_len, max_len;
3004     int i;
3005     int err = 0;
3006     bool bad_address = false;
3007 
3008     if (count == 0) {
3009         errno = 0;
3010         return NULL;
3011     }
3012     if (count > IOV_MAX) {
3013         errno = EINVAL;
3014         return NULL;
3015     }
3016 
3017     vec = g_try_new0(struct iovec, count);
3018     if (vec == NULL) {
3019         errno = ENOMEM;
3020         return NULL;
3021     }
3022 
3023     target_vec = lock_user(VERIFY_READ, target_addr,
3024                            count * sizeof(struct target_iovec), 1);
3025     if (target_vec == NULL) {
3026         err = EFAULT;
3027         goto fail2;
3028     }
3029 
3030     /* ??? If host page size > target page size, this will result in a
3031        value larger than what we can actually support.  */
3032     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3033     total_len = 0;
3034 
3035     for (i = 0; i < count; i++) {
3036         abi_ulong base = tswapal(target_vec[i].iov_base);
3037         abi_long len = tswapal(target_vec[i].iov_len);
3038 
3039         if (len < 0) {
3040             err = EINVAL;
3041             goto fail;
3042         } else if (len == 0) {
3043             /* Zero length pointer is ignored.  */
3044             vec[i].iov_base = 0;
3045         } else {
3046             vec[i].iov_base = lock_user(type, base, len, copy);
3047             /* If the first buffer pointer is bad, this is a fault.  But
3048              * subsequent bad buffers will result in a partial write; this
3049              * is realized by filling the vector with null pointers and
3050              * zero lengths. */
3051             if (!vec[i].iov_base) {
3052                 if (i == 0) {
3053                     err = EFAULT;
3054                     goto fail;
3055                 } else {
3056                     bad_address = true;
3057                 }
3058             }
3059             if (bad_address) {
3060                 len = 0;
3061             }
3062             if (len > max_len - total_len) {
3063                 len = max_len - total_len;
3064             }
3065         }
3066         vec[i].iov_len = len;
3067         total_len += len;
3068     }
3069 
3070     unlock_user(target_vec, target_addr, 0);
3071     return vec;
3072 
3073  fail:
3074     while (--i >= 0) {
3075         if (tswapal(target_vec[i].iov_len) > 0) {
3076             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3077         }
3078     }
3079     unlock_user(target_vec, target_addr, 0);
3080  fail2:
3081     g_free(vec);
3082     errno = err;
3083     return NULL;
3084 }
3085 
3086 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3087                          abi_ulong count, int copy)
3088 {
3089     struct target_iovec *target_vec;
3090     int i;
3091 
3092     target_vec = lock_user(VERIFY_READ, target_addr,
3093                            count * sizeof(struct target_iovec), 1);
3094     if (target_vec) {
3095         for (i = 0; i < count; i++) {
3096             abi_ulong base = tswapal(target_vec[i].iov_base);
3097             abi_long len = tswapal(target_vec[i].iov_len);
3098             if (len < 0) {
3099                 break;
3100             }
3101             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3102         }
3103         unlock_user(target_vec, target_addr, 0);
3104     }
3105 
3106     g_free(vec);
3107 }
3108 
3109 static inline int target_to_host_sock_type(int *type)
3110 {
3111     int host_type = 0;
3112     int target_type = *type;
3113 
3114     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3115     case TARGET_SOCK_DGRAM:
3116         host_type = SOCK_DGRAM;
3117         break;
3118     case TARGET_SOCK_STREAM:
3119         host_type = SOCK_STREAM;
3120         break;
3121     default:
3122         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3123         break;
3124     }
3125     if (target_type & TARGET_SOCK_CLOEXEC) {
3126 #if defined(SOCK_CLOEXEC)
3127         host_type |= SOCK_CLOEXEC;
3128 #else
3129         return -TARGET_EINVAL;
3130 #endif
3131     }
3132     if (target_type & TARGET_SOCK_NONBLOCK) {
3133 #if defined(SOCK_NONBLOCK)
3134         host_type |= SOCK_NONBLOCK;
3135 #elif !defined(O_NONBLOCK)
3136         return -TARGET_EINVAL;
3137 #endif
3138     }
3139     *type = host_type;
3140     return 0;
3141 }
3142 
3143 /* Try to emulate socket type flags after socket creation.  */
3144 static int sock_flags_fixup(int fd, int target_type)
3145 {
3146 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3147     if (target_type & TARGET_SOCK_NONBLOCK) {
3148         int flags = fcntl(fd, F_GETFL);
3149         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3150             close(fd);
3151             return -TARGET_EINVAL;
3152         }
3153     }
3154 #endif
3155     return fd;
3156 }
3157 
3158 /* do_socket() Must return target values and target errnos. */
3159 static abi_long do_socket(int domain, int type, int protocol)
3160 {
3161     int target_type = type;
3162     int ret;
3163 
3164     ret = target_to_host_sock_type(&type);
3165     if (ret) {
3166         return ret;
3167     }
3168 
3169     if (domain == PF_NETLINK && !(
3170 #ifdef CONFIG_RTNETLINK
3171          protocol == NETLINK_ROUTE ||
3172 #endif
3173          protocol == NETLINK_KOBJECT_UEVENT ||
3174          protocol == NETLINK_AUDIT)) {
3175         return -TARGET_EPROTONOSUPPORT;
3176     }
3177 
3178     if (domain == AF_PACKET ||
3179         (domain == AF_INET && type == SOCK_PACKET)) {
3180         protocol = tswap16(protocol);
3181     }
3182 
3183     ret = get_errno(socket(domain, type, protocol));
3184     if (ret >= 0) {
3185         ret = sock_flags_fixup(ret, target_type);
3186         if (type == SOCK_PACKET) {
3187             /* Manage an obsolete case :
3188              * if socket type is SOCK_PACKET, bind by name
3189              */
3190             fd_trans_register(ret, &target_packet_trans);
3191         } else if (domain == PF_NETLINK) {
3192             switch (protocol) {
3193 #ifdef CONFIG_RTNETLINK
3194             case NETLINK_ROUTE:
3195                 fd_trans_register(ret, &target_netlink_route_trans);
3196                 break;
3197 #endif
3198             case NETLINK_KOBJECT_UEVENT:
3199                 /* nothing to do: messages are strings */
3200                 break;
3201             case NETLINK_AUDIT:
3202                 fd_trans_register(ret, &target_netlink_audit_trans);
3203                 break;
3204             default:
3205                 g_assert_not_reached();
3206             }
3207         }
3208     }
3209     return ret;
3210 }
3211 
3212 /* do_bind() Must return target values and target errnos. */
3213 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3214                         socklen_t addrlen)
3215 {
3216     void *addr;
3217     abi_long ret;
3218 
3219     if ((int)addrlen < 0) {
3220         return -TARGET_EINVAL;
3221     }
3222 
3223     addr = alloca(addrlen+1);
3224 
3225     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3226     if (ret)
3227         return ret;
3228 
3229     return get_errno(bind(sockfd, addr, addrlen));
3230 }
3231 
3232 /* do_connect() Must return target values and target errnos. */
3233 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3234                            socklen_t addrlen)
3235 {
3236     void *addr;
3237     abi_long ret;
3238 
3239     if ((int)addrlen < 0) {
3240         return -TARGET_EINVAL;
3241     }
3242 
3243     addr = alloca(addrlen+1);
3244 
3245     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3246     if (ret)
3247         return ret;
3248 
3249     return get_errno(safe_connect(sockfd, addr, addrlen));
3250 }
3251 
3252 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3253 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3254                                       int flags, int send)
3255 {
3256     abi_long ret, len;
3257     struct msghdr msg;
3258     abi_ulong count;
3259     struct iovec *vec;
3260     abi_ulong target_vec;
3261 
3262     if (msgp->msg_name) {
3263         msg.msg_namelen = tswap32(msgp->msg_namelen);
3264         msg.msg_name = alloca(msg.msg_namelen+1);
3265         ret = target_to_host_sockaddr(fd, msg.msg_name,
3266                                       tswapal(msgp->msg_name),
3267                                       msg.msg_namelen);
3268         if (ret == -TARGET_EFAULT) {
3269             /* For connected sockets msg_name and msg_namelen must
3270              * be ignored, so returning EFAULT immediately is wrong.
3271              * Instead, pass a bad msg_name to the host kernel, and
3272              * let it decide whether to return EFAULT or not.
3273              */
3274             msg.msg_name = (void *)-1;
3275         } else if (ret) {
3276             goto out2;
3277         }
3278     } else {
3279         msg.msg_name = NULL;
3280         msg.msg_namelen = 0;
3281     }
3282     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3283     msg.msg_control = alloca(msg.msg_controllen);
3284     memset(msg.msg_control, 0, msg.msg_controllen);
3285 
3286     msg.msg_flags = tswap32(msgp->msg_flags);
3287 
3288     count = tswapal(msgp->msg_iovlen);
3289     target_vec = tswapal(msgp->msg_iov);
3290 
3291     if (count > IOV_MAX) {
3292         /* sendrcvmsg returns a different errno for this condition than
3293          * readv/writev, so we must catch it here before lock_iovec() does.
3294          */
3295         ret = -TARGET_EMSGSIZE;
3296         goto out2;
3297     }
3298 
3299     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3300                      target_vec, count, send);
3301     if (vec == NULL) {
3302         ret = -host_to_target_errno(errno);
3303         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3304         if (!send || ret) {
3305             goto out2;
3306         }
3307     }
3308     msg.msg_iovlen = count;
3309     msg.msg_iov = vec;
3310 
3311     if (send) {
3312         if (fd_trans_target_to_host_data(fd)) {
3313             void *host_msg;
3314 
3315             host_msg = g_malloc(msg.msg_iov->iov_len);
3316             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3317             ret = fd_trans_target_to_host_data(fd)(host_msg,
3318                                                    msg.msg_iov->iov_len);
3319             if (ret >= 0) {
3320                 msg.msg_iov->iov_base = host_msg;
3321                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3322             }
3323             g_free(host_msg);
3324         } else {
3325             ret = target_to_host_cmsg(&msg, msgp);
3326             if (ret == 0) {
3327                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3328             }
3329         }
3330     } else {
3331         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3332         if (!is_error(ret)) {
3333             len = ret;
3334             if (fd_trans_host_to_target_data(fd)) {
3335                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3336                                                MIN(msg.msg_iov->iov_len, len));
3337             }
3338             if (!is_error(ret)) {
3339                 ret = host_to_target_cmsg(msgp, &msg);
3340             }
3341             if (!is_error(ret)) {
3342                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3343                 msgp->msg_flags = tswap32(msg.msg_flags);
3344                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3345                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3346                                     msg.msg_name, msg.msg_namelen);
3347                     if (ret) {
3348                         goto out;
3349                     }
3350                 }
3351 
3352                 ret = len;
3353             }
3354         }
3355     }
3356 
3357 out:
3358     if (vec) {
3359         unlock_iovec(vec, target_vec, count, !send);
3360     }
3361 out2:
3362     return ret;
3363 }
3364 
3365 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3366                                int flags, int send)
3367 {
3368     abi_long ret;
3369     struct target_msghdr *msgp;
3370 
3371     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3372                           msgp,
3373                           target_msg,
3374                           send ? 1 : 0)) {
3375         return -TARGET_EFAULT;
3376     }
3377     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3378     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3379     return ret;
3380 }
3381 
3382 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3383  * so it might not have this *mmsg-specific flag either.
3384  */
3385 #ifndef MSG_WAITFORONE
3386 #define MSG_WAITFORONE 0x10000
3387 #endif
3388 
3389 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3390                                 unsigned int vlen, unsigned int flags,
3391                                 int send)
3392 {
3393     struct target_mmsghdr *mmsgp;
3394     abi_long ret = 0;
3395     int i;
3396 
3397     if (vlen > UIO_MAXIOV) {
3398         vlen = UIO_MAXIOV;
3399     }
3400 
3401     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3402     if (!mmsgp) {
3403         return -TARGET_EFAULT;
3404     }
3405 
3406     for (i = 0; i < vlen; i++) {
3407         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3408         if (is_error(ret)) {
3409             break;
3410         }
3411         mmsgp[i].msg_len = tswap32(ret);
3412         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3413         if (flags & MSG_WAITFORONE) {
3414             flags |= MSG_DONTWAIT;
3415         }
3416     }
3417 
3418     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3419 
3420     /* Return number of datagrams sent if we sent any at all;
3421      * otherwise return the error.
3422      */
3423     if (i) {
3424         return i;
3425     }
3426     return ret;
3427 }
3428 
3429 /* do_accept4() Must return target values and target errnos. */
3430 static abi_long do_accept4(int fd, abi_ulong target_addr,
3431                            abi_ulong target_addrlen_addr, int flags)
3432 {
3433     socklen_t addrlen, ret_addrlen;
3434     void *addr;
3435     abi_long ret;
3436     int host_flags;
3437 
3438     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3439 
3440     if (target_addr == 0) {
3441         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3442     }
3443 
3444     /* linux returns EFAULT if addrlen pointer is invalid */
3445     if (get_user_u32(addrlen, target_addrlen_addr))
3446         return -TARGET_EFAULT;
3447 
3448     if ((int)addrlen < 0) {
3449         return -TARGET_EINVAL;
3450     }
3451 
3452     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3453         return -TARGET_EFAULT;
3454     }
3455 
3456     addr = alloca(addrlen);
3457 
3458     ret_addrlen = addrlen;
3459     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3460     if (!is_error(ret)) {
3461         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3462         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3463             ret = -TARGET_EFAULT;
3464         }
3465     }
3466     return ret;
3467 }
3468 
3469 /* do_getpeername() Must return target values and target errnos. */
3470 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3471                                abi_ulong target_addrlen_addr)
3472 {
3473     socklen_t addrlen, ret_addrlen;
3474     void *addr;
3475     abi_long ret;
3476 
3477     if (get_user_u32(addrlen, target_addrlen_addr))
3478         return -TARGET_EFAULT;
3479 
3480     if ((int)addrlen < 0) {
3481         return -TARGET_EINVAL;
3482     }
3483 
3484     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3485         return -TARGET_EFAULT;
3486     }
3487 
3488     addr = alloca(addrlen);
3489 
3490     ret_addrlen = addrlen;
3491     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3492     if (!is_error(ret)) {
3493         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3494         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3495             ret = -TARGET_EFAULT;
3496         }
3497     }
3498     return ret;
3499 }
3500 
3501 /* do_getsockname() Must return target values and target errnos. */
3502 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3503                                abi_ulong target_addrlen_addr)
3504 {
3505     socklen_t addrlen, ret_addrlen;
3506     void *addr;
3507     abi_long ret;
3508 
3509     if (get_user_u32(addrlen, target_addrlen_addr))
3510         return -TARGET_EFAULT;
3511 
3512     if ((int)addrlen < 0) {
3513         return -TARGET_EINVAL;
3514     }
3515 
3516     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3517         return -TARGET_EFAULT;
3518     }
3519 
3520     addr = alloca(addrlen);
3521 
3522     ret_addrlen = addrlen;
3523     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3524     if (!is_error(ret)) {
3525         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3526         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3527             ret = -TARGET_EFAULT;
3528         }
3529     }
3530     return ret;
3531 }
3532 
3533 /* do_socketpair() Must return target values and target errnos. */
3534 static abi_long do_socketpair(int domain, int type, int protocol,
3535                               abi_ulong target_tab_addr)
3536 {
3537     int tab[2];
3538     abi_long ret;
3539 
3540     target_to_host_sock_type(&type);
3541 
3542     ret = get_errno(socketpair(domain, type, protocol, tab));
3543     if (!is_error(ret)) {
3544         if (put_user_s32(tab[0], target_tab_addr)
3545             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3546             ret = -TARGET_EFAULT;
3547     }
3548     return ret;
3549 }
3550 
3551 /* do_sendto() Must return target values and target errnos. */
3552 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3553                           abi_ulong target_addr, socklen_t addrlen)
3554 {
3555     void *addr;
3556     void *host_msg;
3557     void *copy_msg = NULL;
3558     abi_long ret;
3559 
3560     if ((int)addrlen < 0) {
3561         return -TARGET_EINVAL;
3562     }
3563 
3564     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3565     if (!host_msg)
3566         return -TARGET_EFAULT;
3567     if (fd_trans_target_to_host_data(fd)) {
3568         copy_msg = host_msg;
3569         host_msg = g_malloc(len);
3570         memcpy(host_msg, copy_msg, len);
3571         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3572         if (ret < 0) {
3573             goto fail;
3574         }
3575     }
3576     if (target_addr) {
3577         addr = alloca(addrlen+1);
3578         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3579         if (ret) {
3580             goto fail;
3581         }
3582         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3583     } else {
3584         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3585     }
3586 fail:
3587     if (copy_msg) {
3588         g_free(host_msg);
3589         host_msg = copy_msg;
3590     }
3591     unlock_user(host_msg, msg, 0);
3592     return ret;
3593 }
3594 
3595 /* do_recvfrom() Must return target values and target errnos. */
3596 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3597                             abi_ulong target_addr,
3598                             abi_ulong target_addrlen)
3599 {
3600     socklen_t addrlen, ret_addrlen;
3601     void *addr;
3602     void *host_msg;
3603     abi_long ret;
3604 
3605     if (!msg) {
3606         host_msg = NULL;
3607     } else {
3608         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3609         if (!host_msg) {
3610             return -TARGET_EFAULT;
3611         }
3612     }
3613     if (target_addr) {
3614         if (get_user_u32(addrlen, target_addrlen)) {
3615             ret = -TARGET_EFAULT;
3616             goto fail;
3617         }
3618         if ((int)addrlen < 0) {
3619             ret = -TARGET_EINVAL;
3620             goto fail;
3621         }
3622         addr = alloca(addrlen);
3623         ret_addrlen = addrlen;
3624         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3625                                       addr, &ret_addrlen));
3626     } else {
3627         addr = NULL; /* To keep compiler quiet.  */
3628         addrlen = 0; /* To keep compiler quiet.  */
3629         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3630     }
3631     if (!is_error(ret)) {
3632         if (fd_trans_host_to_target_data(fd)) {
3633             abi_long trans;
3634             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3635             if (is_error(trans)) {
3636                 ret = trans;
3637                 goto fail;
3638             }
3639         }
3640         if (target_addr) {
3641             host_to_target_sockaddr(target_addr, addr,
3642                                     MIN(addrlen, ret_addrlen));
3643             if (put_user_u32(ret_addrlen, target_addrlen)) {
3644                 ret = -TARGET_EFAULT;
3645                 goto fail;
3646             }
3647         }
3648         unlock_user(host_msg, msg, len);
3649     } else {
3650 fail:
3651         unlock_user(host_msg, msg, 0);
3652     }
3653     return ret;
3654 }
3655 
3656 #ifdef TARGET_NR_socketcall
3657 /* do_socketcall() must return target values and target errnos. */
3658 static abi_long do_socketcall(int num, abi_ulong vptr)
3659 {
3660     static const unsigned nargs[] = { /* number of arguments per operation */
3661         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3662         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3663         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3664         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3665         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3666         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3667         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3668         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3669         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3670         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3671         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3672         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3673         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3674         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3675         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3676         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3677         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3678         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3679         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3680         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3681     };
3682     abi_long a[6]; /* max 6 args */
3683     unsigned i;
3684 
3685     /* check the range of the first argument num */
3686     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3687     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3688         return -TARGET_EINVAL;
3689     }
3690     /* ensure we have space for args */
3691     if (nargs[num] > ARRAY_SIZE(a)) {
3692         return -TARGET_EINVAL;
3693     }
3694     /* collect the arguments in a[] according to nargs[] */
3695     for (i = 0; i < nargs[num]; ++i) {
3696         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3697             return -TARGET_EFAULT;
3698         }
3699     }
3700     /* now when we have the args, invoke the appropriate underlying function */
3701     switch (num) {
3702     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3703         return do_socket(a[0], a[1], a[2]);
3704     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3705         return do_bind(a[0], a[1], a[2]);
3706     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3707         return do_connect(a[0], a[1], a[2]);
3708     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3709         return get_errno(listen(a[0], a[1]));
3710     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3711         return do_accept4(a[0], a[1], a[2], 0);
3712     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3713         return do_getsockname(a[0], a[1], a[2]);
3714     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3715         return do_getpeername(a[0], a[1], a[2]);
3716     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3717         return do_socketpair(a[0], a[1], a[2], a[3]);
3718     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3719         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3720     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3721         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3722     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3723         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3724     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3725         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3726     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3727         return get_errno(shutdown(a[0], a[1]));
3728     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3729         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3730     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3731         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3732     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3733         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3734     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3735         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3736     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3737         return do_accept4(a[0], a[1], a[2], a[3]);
3738     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3739         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3740     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3741         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3742     default:
3743         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3744         return -TARGET_EINVAL;
3745     }
3746 }
3747 #endif
3748 
3749 #define N_SHM_REGIONS	32
3750 
3751 static struct shm_region {
3752     abi_ulong start;
3753     abi_ulong size;
3754     bool in_use;
3755 } shm_regions[N_SHM_REGIONS];
3756 
3757 #ifndef TARGET_SEMID64_DS
3758 /* asm-generic version of this struct */
3759 struct target_semid64_ds
3760 {
3761   struct target_ipc_perm sem_perm;
3762   abi_ulong sem_otime;
3763 #if TARGET_ABI_BITS == 32
3764   abi_ulong __unused1;
3765 #endif
3766   abi_ulong sem_ctime;
3767 #if TARGET_ABI_BITS == 32
3768   abi_ulong __unused2;
3769 #endif
3770   abi_ulong sem_nsems;
3771   abi_ulong __unused3;
3772   abi_ulong __unused4;
3773 };
3774 #endif
3775 
3776 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3777                                                abi_ulong target_addr)
3778 {
3779     struct target_ipc_perm *target_ip;
3780     struct target_semid64_ds *target_sd;
3781 
3782     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3783         return -TARGET_EFAULT;
3784     target_ip = &(target_sd->sem_perm);
3785     host_ip->__key = tswap32(target_ip->__key);
3786     host_ip->uid = tswap32(target_ip->uid);
3787     host_ip->gid = tswap32(target_ip->gid);
3788     host_ip->cuid = tswap32(target_ip->cuid);
3789     host_ip->cgid = tswap32(target_ip->cgid);
3790 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3791     host_ip->mode = tswap32(target_ip->mode);
3792 #else
3793     host_ip->mode = tswap16(target_ip->mode);
3794 #endif
3795 #if defined(TARGET_PPC)
3796     host_ip->__seq = tswap32(target_ip->__seq);
3797 #else
3798     host_ip->__seq = tswap16(target_ip->__seq);
3799 #endif
3800     unlock_user_struct(target_sd, target_addr, 0);
3801     return 0;
3802 }
3803 
3804 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3805                                                struct ipc_perm *host_ip)
3806 {
3807     struct target_ipc_perm *target_ip;
3808     struct target_semid64_ds *target_sd;
3809 
3810     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3811         return -TARGET_EFAULT;
3812     target_ip = &(target_sd->sem_perm);
3813     target_ip->__key = tswap32(host_ip->__key);
3814     target_ip->uid = tswap32(host_ip->uid);
3815     target_ip->gid = tswap32(host_ip->gid);
3816     target_ip->cuid = tswap32(host_ip->cuid);
3817     target_ip->cgid = tswap32(host_ip->cgid);
3818 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3819     target_ip->mode = tswap32(host_ip->mode);
3820 #else
3821     target_ip->mode = tswap16(host_ip->mode);
3822 #endif
3823 #if defined(TARGET_PPC)
3824     target_ip->__seq = tswap32(host_ip->__seq);
3825 #else
3826     target_ip->__seq = tswap16(host_ip->__seq);
3827 #endif
3828     unlock_user_struct(target_sd, target_addr, 1);
3829     return 0;
3830 }
3831 
3832 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3833                                                abi_ulong target_addr)
3834 {
3835     struct target_semid64_ds *target_sd;
3836 
3837     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3838         return -TARGET_EFAULT;
3839     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3840         return -TARGET_EFAULT;
3841     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3842     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3843     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3844     unlock_user_struct(target_sd, target_addr, 0);
3845     return 0;
3846 }
3847 
3848 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3849                                                struct semid_ds *host_sd)
3850 {
3851     struct target_semid64_ds *target_sd;
3852 
3853     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3854         return -TARGET_EFAULT;
3855     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3856         return -TARGET_EFAULT;
3857     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3858     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3859     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3860     unlock_user_struct(target_sd, target_addr, 1);
3861     return 0;
3862 }
3863 
3864 struct target_seminfo {
3865     int semmap;
3866     int semmni;
3867     int semmns;
3868     int semmnu;
3869     int semmsl;
3870     int semopm;
3871     int semume;
3872     int semusz;
3873     int semvmx;
3874     int semaem;
3875 };
3876 
3877 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3878                                               struct seminfo *host_seminfo)
3879 {
3880     struct target_seminfo *target_seminfo;
3881     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3882         return -TARGET_EFAULT;
3883     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3884     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3885     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3886     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3887     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3888     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3889     __put_user(host_seminfo->semume, &target_seminfo->semume);
3890     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3891     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3892     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3893     unlock_user_struct(target_seminfo, target_addr, 1);
3894     return 0;
3895 }
3896 
3897 union semun {
3898 	int val;
3899 	struct semid_ds *buf;
3900 	unsigned short *array;
3901 	struct seminfo *__buf;
3902 };
3903 
3904 union target_semun {
3905 	int val;
3906 	abi_ulong buf;
3907 	abi_ulong array;
3908 	abi_ulong __buf;
3909 };
3910 
3911 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3912                                                abi_ulong target_addr)
3913 {
3914     int nsems;
3915     unsigned short *array;
3916     union semun semun;
3917     struct semid_ds semid_ds;
3918     int i, ret;
3919 
3920     semun.buf = &semid_ds;
3921 
3922     ret = semctl(semid, 0, IPC_STAT, semun);
3923     if (ret == -1)
3924         return get_errno(ret);
3925 
3926     nsems = semid_ds.sem_nsems;
3927 
3928     *host_array = g_try_new(unsigned short, nsems);
3929     if (!*host_array) {
3930         return -TARGET_ENOMEM;
3931     }
3932     array = lock_user(VERIFY_READ, target_addr,
3933                       nsems*sizeof(unsigned short), 1);
3934     if (!array) {
3935         g_free(*host_array);
3936         return -TARGET_EFAULT;
3937     }
3938 
3939     for(i=0; i<nsems; i++) {
3940         __get_user((*host_array)[i], &array[i]);
3941     }
3942     unlock_user(array, target_addr, 0);
3943 
3944     return 0;
3945 }
3946 
3947 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3948                                                unsigned short **host_array)
3949 {
3950     int nsems;
3951     unsigned short *array;
3952     union semun semun;
3953     struct semid_ds semid_ds;
3954     int i, ret;
3955 
3956     semun.buf = &semid_ds;
3957 
3958     ret = semctl(semid, 0, IPC_STAT, semun);
3959     if (ret == -1)
3960         return get_errno(ret);
3961 
3962     nsems = semid_ds.sem_nsems;
3963 
3964     array = lock_user(VERIFY_WRITE, target_addr,
3965                       nsems*sizeof(unsigned short), 0);
3966     if (!array)
3967         return -TARGET_EFAULT;
3968 
3969     for(i=0; i<nsems; i++) {
3970         __put_user((*host_array)[i], &array[i]);
3971     }
3972     g_free(*host_array);
3973     unlock_user(array, target_addr, 1);
3974 
3975     return 0;
3976 }
3977 
3978 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3979                                  abi_ulong target_arg)
3980 {
3981     union target_semun target_su = { .buf = target_arg };
3982     union semun arg;
3983     struct semid_ds dsarg;
3984     unsigned short *array = NULL;
3985     struct seminfo seminfo;
3986     abi_long ret = -TARGET_EINVAL;
3987     abi_long err;
3988     cmd &= 0xff;
3989 
3990     switch( cmd ) {
3991 	case GETVAL:
3992 	case SETVAL:
3993             /* In 64 bit cross-endian situations, we will erroneously pick up
3994              * the wrong half of the union for the "val" element.  To rectify
3995              * this, the entire 8-byte structure is byteswapped, followed by
3996 	     * a swap of the 4 byte val field. In other cases, the data is
3997 	     * already in proper host byte order. */
3998 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3999 		target_su.buf = tswapal(target_su.buf);
4000 		arg.val = tswap32(target_su.val);
4001 	    } else {
4002 		arg.val = target_su.val;
4003 	    }
4004             ret = get_errno(semctl(semid, semnum, cmd, arg));
4005             break;
4006 	case GETALL:
4007 	case SETALL:
4008             err = target_to_host_semarray(semid, &array, target_su.array);
4009             if (err)
4010                 return err;
4011             arg.array = array;
4012             ret = get_errno(semctl(semid, semnum, cmd, arg));
4013             err = host_to_target_semarray(semid, target_su.array, &array);
4014             if (err)
4015                 return err;
4016             break;
4017 	case IPC_STAT:
4018 	case IPC_SET:
4019 	case SEM_STAT:
4020             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4021             if (err)
4022                 return err;
4023             arg.buf = &dsarg;
4024             ret = get_errno(semctl(semid, semnum, cmd, arg));
4025             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4026             if (err)
4027                 return err;
4028             break;
4029 	case IPC_INFO:
4030 	case SEM_INFO:
4031             arg.__buf = &seminfo;
4032             ret = get_errno(semctl(semid, semnum, cmd, arg));
4033             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4034             if (err)
4035                 return err;
4036             break;
4037 	case IPC_RMID:
4038 	case GETPID:
4039 	case GETNCNT:
4040 	case GETZCNT:
4041             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4042             break;
4043     }
4044 
4045     return ret;
4046 }
4047 
4048 struct target_sembuf {
4049     unsigned short sem_num;
4050     short sem_op;
4051     short sem_flg;
4052 };
4053 
4054 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4055                                              abi_ulong target_addr,
4056                                              unsigned nsops)
4057 {
4058     struct target_sembuf *target_sembuf;
4059     int i;
4060 
4061     target_sembuf = lock_user(VERIFY_READ, target_addr,
4062                               nsops*sizeof(struct target_sembuf), 1);
4063     if (!target_sembuf)
4064         return -TARGET_EFAULT;
4065 
4066     for(i=0; i<nsops; i++) {
4067         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4068         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4069         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4070     }
4071 
4072     unlock_user(target_sembuf, target_addr, 0);
4073 
4074     return 0;
4075 }
4076 
4077 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4078     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4079 
4080 /*
4081  * This macro is required to handle the s390 variants, which passes the
4082  * arguments in a different order than default.
4083  */
4084 #ifdef __s390x__
4085 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4086   (__nsops), (__timeout), (__sops)
4087 #else
4088 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4089   (__nsops), 0, (__sops), (__timeout)
4090 #endif
4091 
4092 static inline abi_long do_semtimedop(int semid,
4093                                      abi_long ptr,
4094                                      unsigned nsops,
4095                                      abi_long timeout, bool time64)
4096 {
4097     struct sembuf *sops;
4098     struct timespec ts, *pts = NULL;
4099     abi_long ret;
4100 
4101     if (timeout) {
4102         pts = &ts;
4103         if (time64) {
4104             if (target_to_host_timespec64(pts, timeout)) {
4105                 return -TARGET_EFAULT;
4106             }
4107         } else {
4108             if (target_to_host_timespec(pts, timeout)) {
4109                 return -TARGET_EFAULT;
4110             }
4111         }
4112     }
4113 
4114     if (nsops > TARGET_SEMOPM) {
4115         return -TARGET_E2BIG;
4116     }
4117 
4118     sops = g_new(struct sembuf, nsops);
4119 
4120     if (target_to_host_sembuf(sops, ptr, nsops)) {
4121         g_free(sops);
4122         return -TARGET_EFAULT;
4123     }
4124 
4125     ret = -TARGET_ENOSYS;
4126 #ifdef __NR_semtimedop
4127     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4128 #endif
4129 #ifdef __NR_ipc
4130     if (ret == -TARGET_ENOSYS) {
4131         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4132                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4133     }
4134 #endif
4135     g_free(sops);
4136     return ret;
4137 }
4138 #endif
4139 
4140 struct target_msqid_ds
4141 {
4142     struct target_ipc_perm msg_perm;
4143     abi_ulong msg_stime;
4144 #if TARGET_ABI_BITS == 32
4145     abi_ulong __unused1;
4146 #endif
4147     abi_ulong msg_rtime;
4148 #if TARGET_ABI_BITS == 32
4149     abi_ulong __unused2;
4150 #endif
4151     abi_ulong msg_ctime;
4152 #if TARGET_ABI_BITS == 32
4153     abi_ulong __unused3;
4154 #endif
4155     abi_ulong __msg_cbytes;
4156     abi_ulong msg_qnum;
4157     abi_ulong msg_qbytes;
4158     abi_ulong msg_lspid;
4159     abi_ulong msg_lrpid;
4160     abi_ulong __unused4;
4161     abi_ulong __unused5;
4162 };
4163 
4164 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4165                                                abi_ulong target_addr)
4166 {
4167     struct target_msqid_ds *target_md;
4168 
4169     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4170         return -TARGET_EFAULT;
4171     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4172         return -TARGET_EFAULT;
4173     host_md->msg_stime = tswapal(target_md->msg_stime);
4174     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4175     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4176     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4177     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4178     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4179     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4180     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4181     unlock_user_struct(target_md, target_addr, 0);
4182     return 0;
4183 }
4184 
4185 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4186                                                struct msqid_ds *host_md)
4187 {
4188     struct target_msqid_ds *target_md;
4189 
4190     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4191         return -TARGET_EFAULT;
4192     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4193         return -TARGET_EFAULT;
4194     target_md->msg_stime = tswapal(host_md->msg_stime);
4195     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4196     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4197     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4198     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4199     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4200     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4201     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4202     unlock_user_struct(target_md, target_addr, 1);
4203     return 0;
4204 }
4205 
4206 struct target_msginfo {
4207     int msgpool;
4208     int msgmap;
4209     int msgmax;
4210     int msgmnb;
4211     int msgmni;
4212     int msgssz;
4213     int msgtql;
4214     unsigned short int msgseg;
4215 };
4216 
4217 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4218                                               struct msginfo *host_msginfo)
4219 {
4220     struct target_msginfo *target_msginfo;
4221     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4222         return -TARGET_EFAULT;
4223     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4224     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4225     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4226     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4227     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4228     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4229     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4230     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4231     unlock_user_struct(target_msginfo, target_addr, 1);
4232     return 0;
4233 }
4234 
4235 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4236 {
4237     struct msqid_ds dsarg;
4238     struct msginfo msginfo;
4239     abi_long ret = -TARGET_EINVAL;
4240 
4241     cmd &= 0xff;
4242 
4243     switch (cmd) {
4244     case IPC_STAT:
4245     case IPC_SET:
4246     case MSG_STAT:
4247         if (target_to_host_msqid_ds(&dsarg,ptr))
4248             return -TARGET_EFAULT;
4249         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4250         if (host_to_target_msqid_ds(ptr,&dsarg))
4251             return -TARGET_EFAULT;
4252         break;
4253     case IPC_RMID:
4254         ret = get_errno(msgctl(msgid, cmd, NULL));
4255         break;
4256     case IPC_INFO:
4257     case MSG_INFO:
4258         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4259         if (host_to_target_msginfo(ptr, &msginfo))
4260             return -TARGET_EFAULT;
4261         break;
4262     }
4263 
4264     return ret;
4265 }
4266 
4267 struct target_msgbuf {
4268     abi_long mtype;
4269     char	mtext[1];
4270 };
4271 
4272 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4273                                  ssize_t msgsz, int msgflg)
4274 {
4275     struct target_msgbuf *target_mb;
4276     struct msgbuf *host_mb;
4277     abi_long ret = 0;
4278 
4279     if (msgsz < 0) {
4280         return -TARGET_EINVAL;
4281     }
4282 
4283     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4284         return -TARGET_EFAULT;
4285     host_mb = g_try_malloc(msgsz + sizeof(long));
4286     if (!host_mb) {
4287         unlock_user_struct(target_mb, msgp, 0);
4288         return -TARGET_ENOMEM;
4289     }
4290     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4291     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4292     ret = -TARGET_ENOSYS;
4293 #ifdef __NR_msgsnd
4294     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4295 #endif
4296 #ifdef __NR_ipc
4297     if (ret == -TARGET_ENOSYS) {
4298 #ifdef __s390x__
4299         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4300                                  host_mb));
4301 #else
4302         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4303                                  host_mb, 0));
4304 #endif
4305     }
4306 #endif
4307     g_free(host_mb);
4308     unlock_user_struct(target_mb, msgp, 0);
4309 
4310     return ret;
4311 }
4312 
4313 #ifdef __NR_ipc
4314 #if defined(__sparc__)
4315 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4316 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4317 #elif defined(__s390x__)
4318 /* The s390 sys_ipc variant has only five parameters.  */
4319 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4320     ((long int[]){(long int)__msgp, __msgtyp})
4321 #else
4322 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4323     ((long int[]){(long int)__msgp, __msgtyp}), 0
4324 #endif
4325 #endif
4326 
4327 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4328                                  ssize_t msgsz, abi_long msgtyp,
4329                                  int msgflg)
4330 {
4331     struct target_msgbuf *target_mb;
4332     char *target_mtext;
4333     struct msgbuf *host_mb;
4334     abi_long ret = 0;
4335 
4336     if (msgsz < 0) {
4337         return -TARGET_EINVAL;
4338     }
4339 
4340     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4341         return -TARGET_EFAULT;
4342 
4343     host_mb = g_try_malloc(msgsz + sizeof(long));
4344     if (!host_mb) {
4345         ret = -TARGET_ENOMEM;
4346         goto end;
4347     }
4348     ret = -TARGET_ENOSYS;
4349 #ifdef __NR_msgrcv
4350     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4351 #endif
4352 #ifdef __NR_ipc
4353     if (ret == -TARGET_ENOSYS) {
4354         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4355                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4356     }
4357 #endif
4358 
4359     if (ret > 0) {
4360         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4361         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4362         if (!target_mtext) {
4363             ret = -TARGET_EFAULT;
4364             goto end;
4365         }
4366         memcpy(target_mb->mtext, host_mb->mtext, ret);
4367         unlock_user(target_mtext, target_mtext_addr, ret);
4368     }
4369 
4370     target_mb->mtype = tswapal(host_mb->mtype);
4371 
4372 end:
4373     if (target_mb)
4374         unlock_user_struct(target_mb, msgp, 1);
4375     g_free(host_mb);
4376     return ret;
4377 }
4378 
4379 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4380                                                abi_ulong target_addr)
4381 {
4382     struct target_shmid_ds *target_sd;
4383 
4384     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4385         return -TARGET_EFAULT;
4386     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4387         return -TARGET_EFAULT;
4388     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4389     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4390     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4391     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4392     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4393     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4394     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4395     unlock_user_struct(target_sd, target_addr, 0);
4396     return 0;
4397 }
4398 
4399 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4400                                                struct shmid_ds *host_sd)
4401 {
4402     struct target_shmid_ds *target_sd;
4403 
4404     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4405         return -TARGET_EFAULT;
4406     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4407         return -TARGET_EFAULT;
4408     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4409     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4410     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4411     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4412     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4413     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4414     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4415     unlock_user_struct(target_sd, target_addr, 1);
4416     return 0;
4417 }
4418 
4419 struct  target_shminfo {
4420     abi_ulong shmmax;
4421     abi_ulong shmmin;
4422     abi_ulong shmmni;
4423     abi_ulong shmseg;
4424     abi_ulong shmall;
4425 };
4426 
4427 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4428                                               struct shminfo *host_shminfo)
4429 {
4430     struct target_shminfo *target_shminfo;
4431     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4432         return -TARGET_EFAULT;
4433     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4434     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4435     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4436     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4437     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4438     unlock_user_struct(target_shminfo, target_addr, 1);
4439     return 0;
4440 }
4441 
4442 struct target_shm_info {
4443     int used_ids;
4444     abi_ulong shm_tot;
4445     abi_ulong shm_rss;
4446     abi_ulong shm_swp;
4447     abi_ulong swap_attempts;
4448     abi_ulong swap_successes;
4449 };
4450 
4451 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4452                                                struct shm_info *host_shm_info)
4453 {
4454     struct target_shm_info *target_shm_info;
4455     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4456         return -TARGET_EFAULT;
4457     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4458     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4459     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4460     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4461     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4462     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4463     unlock_user_struct(target_shm_info, target_addr, 1);
4464     return 0;
4465 }
4466 
4467 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4468 {
4469     struct shmid_ds dsarg;
4470     struct shminfo shminfo;
4471     struct shm_info shm_info;
4472     abi_long ret = -TARGET_EINVAL;
4473 
4474     cmd &= 0xff;
4475 
4476     switch(cmd) {
4477     case IPC_STAT:
4478     case IPC_SET:
4479     case SHM_STAT:
4480         if (target_to_host_shmid_ds(&dsarg, buf))
4481             return -TARGET_EFAULT;
4482         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4483         if (host_to_target_shmid_ds(buf, &dsarg))
4484             return -TARGET_EFAULT;
4485         break;
4486     case IPC_INFO:
4487         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4488         if (host_to_target_shminfo(buf, &shminfo))
4489             return -TARGET_EFAULT;
4490         break;
4491     case SHM_INFO:
4492         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4493         if (host_to_target_shm_info(buf, &shm_info))
4494             return -TARGET_EFAULT;
4495         break;
4496     case IPC_RMID:
4497     case SHM_LOCK:
4498     case SHM_UNLOCK:
4499         ret = get_errno(shmctl(shmid, cmd, NULL));
4500         break;
4501     }
4502 
4503     return ret;
4504 }
4505 
4506 #ifndef TARGET_FORCE_SHMLBA
4507 /* For most architectures, SHMLBA is the same as the page size;
4508  * some architectures have larger values, in which case they should
4509  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4510  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4511  * and defining its own value for SHMLBA.
4512  *
4513  * The kernel also permits SHMLBA to be set by the architecture to a
4514  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4515  * this means that addresses are rounded to the large size if
4516  * SHM_RND is set but addresses not aligned to that size are not rejected
4517  * as long as they are at least page-aligned. Since the only architecture
4518  * which uses this is ia64 this code doesn't provide for that oddity.
4519  */
4520 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4521 {
4522     return TARGET_PAGE_SIZE;
4523 }
4524 #endif
4525 
4526 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4527                                  int shmid, abi_ulong shmaddr, int shmflg)
4528 {
4529     CPUState *cpu = env_cpu(cpu_env);
4530     abi_long raddr;
4531     void *host_raddr;
4532     struct shmid_ds shm_info;
4533     int i,ret;
4534     abi_ulong shmlba;
4535 
4536     /* shmat pointers are always untagged */
4537 
4538     /* find out the length of the shared memory segment */
4539     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4540     if (is_error(ret)) {
4541         /* can't get length, bail out */
4542         return ret;
4543     }
4544 
4545     shmlba = target_shmlba(cpu_env);
4546 
4547     if (shmaddr & (shmlba - 1)) {
4548         if (shmflg & SHM_RND) {
4549             shmaddr &= ~(shmlba - 1);
4550         } else {
4551             return -TARGET_EINVAL;
4552         }
4553     }
4554     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4555         return -TARGET_EINVAL;
4556     }
4557 
4558     mmap_lock();
4559 
4560     /*
4561      * We're mapping shared memory, so ensure we generate code for parallel
4562      * execution and flush old translations.  This will work up to the level
4563      * supported by the host -- anything that requires EXCP_ATOMIC will not
4564      * be atomic with respect to an external process.
4565      */
4566     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4567         cpu->tcg_cflags |= CF_PARALLEL;
4568         tb_flush(cpu);
4569     }
4570 
4571     if (shmaddr)
4572         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4573     else {
4574         abi_ulong mmap_start;
4575 
4576         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4577         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4578 
4579         if (mmap_start == -1) {
4580             errno = ENOMEM;
4581             host_raddr = (void *)-1;
4582         } else
4583             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4584                                shmflg | SHM_REMAP);
4585     }
4586 
4587     if (host_raddr == (void *)-1) {
4588         mmap_unlock();
4589         return get_errno((long)host_raddr);
4590     }
4591     raddr=h2g((unsigned long)host_raddr);
4592 
4593     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4594                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4595                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4596 
4597     for (i = 0; i < N_SHM_REGIONS; i++) {
4598         if (!shm_regions[i].in_use) {
4599             shm_regions[i].in_use = true;
4600             shm_regions[i].start = raddr;
4601             shm_regions[i].size = shm_info.shm_segsz;
4602             break;
4603         }
4604     }
4605 
4606     mmap_unlock();
4607     return raddr;
4608 
4609 }
4610 
4611 static inline abi_long do_shmdt(abi_ulong shmaddr)
4612 {
4613     int i;
4614     abi_long rv;
4615 
4616     /* shmdt pointers are always untagged */
4617 
4618     mmap_lock();
4619 
4620     for (i = 0; i < N_SHM_REGIONS; ++i) {
4621         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4622             shm_regions[i].in_use = false;
4623             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4624             break;
4625         }
4626     }
4627     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4628 
4629     mmap_unlock();
4630 
4631     return rv;
4632 }
4633 
4634 #ifdef TARGET_NR_ipc
4635 /* ??? This only works with linear mappings.  */
4636 /* do_ipc() must return target values and target errnos. */
4637 static abi_long do_ipc(CPUArchState *cpu_env,
4638                        unsigned int call, abi_long first,
4639                        abi_long second, abi_long third,
4640                        abi_long ptr, abi_long fifth)
4641 {
4642     int version;
4643     abi_long ret = 0;
4644 
4645     version = call >> 16;
4646     call &= 0xffff;
4647 
4648     switch (call) {
4649     case IPCOP_semop:
4650         ret = do_semtimedop(first, ptr, second, 0, false);
4651         break;
4652     case IPCOP_semtimedop:
4653     /*
4654      * The s390 sys_ipc variant has only five parameters instead of six
4655      * (as for default variant) and the only difference is the handling of
4656      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4657      * to a struct timespec where the generic variant uses fifth parameter.
4658      */
4659 #if defined(TARGET_S390X)
4660         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4661 #else
4662         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4663 #endif
4664         break;
4665 
4666     case IPCOP_semget:
4667         ret = get_errno(semget(first, second, third));
4668         break;
4669 
4670     case IPCOP_semctl: {
4671         /* The semun argument to semctl is passed by value, so dereference the
4672          * ptr argument. */
4673         abi_ulong atptr;
4674         get_user_ual(atptr, ptr);
4675         ret = do_semctl(first, second, third, atptr);
4676         break;
4677     }
4678 
4679     case IPCOP_msgget:
4680         ret = get_errno(msgget(first, second));
4681         break;
4682 
4683     case IPCOP_msgsnd:
4684         ret = do_msgsnd(first, ptr, second, third);
4685         break;
4686 
4687     case IPCOP_msgctl:
4688         ret = do_msgctl(first, second, ptr);
4689         break;
4690 
4691     case IPCOP_msgrcv:
4692         switch (version) {
4693         case 0:
4694             {
4695                 struct target_ipc_kludge {
4696                     abi_long msgp;
4697                     abi_long msgtyp;
4698                 } *tmp;
4699 
4700                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4701                     ret = -TARGET_EFAULT;
4702                     break;
4703                 }
4704 
4705                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4706 
4707                 unlock_user_struct(tmp, ptr, 0);
4708                 break;
4709             }
4710         default:
4711             ret = do_msgrcv(first, ptr, second, fifth, third);
4712         }
4713         break;
4714 
4715     case IPCOP_shmat:
4716         switch (version) {
4717         default:
4718         {
4719             abi_ulong raddr;
4720             raddr = do_shmat(cpu_env, first, ptr, second);
4721             if (is_error(raddr))
4722                 return get_errno(raddr);
4723             if (put_user_ual(raddr, third))
4724                 return -TARGET_EFAULT;
4725             break;
4726         }
4727         case 1:
4728             ret = -TARGET_EINVAL;
4729             break;
4730         }
4731 	break;
4732     case IPCOP_shmdt:
4733         ret = do_shmdt(ptr);
4734 	break;
4735 
4736     case IPCOP_shmget:
4737 	/* IPC_* flag values are the same on all linux platforms */
4738 	ret = get_errno(shmget(first, second, third));
4739 	break;
4740 
4741 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4742     case IPCOP_shmctl:
4743         ret = do_shmctl(first, second, ptr);
4744         break;
4745     default:
4746         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4747                       call, version);
4748 	ret = -TARGET_ENOSYS;
4749 	break;
4750     }
4751     return ret;
4752 }
4753 #endif
4754 
4755 /* kernel structure types definitions */
4756 
4757 #define STRUCT(name, ...) STRUCT_ ## name,
4758 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4759 enum {
4760 #include "syscall_types.h"
4761 STRUCT_MAX
4762 };
4763 #undef STRUCT
4764 #undef STRUCT_SPECIAL
4765 
4766 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4767 #define STRUCT_SPECIAL(name)
4768 #include "syscall_types.h"
4769 #undef STRUCT
4770 #undef STRUCT_SPECIAL
4771 
4772 #define MAX_STRUCT_SIZE 4096
4773 
4774 #ifdef CONFIG_FIEMAP
4775 /* So fiemap access checks don't overflow on 32 bit systems.
4776  * This is very slightly smaller than the limit imposed by
4777  * the underlying kernel.
4778  */
4779 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4780                             / sizeof(struct fiemap_extent))
4781 
4782 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4783                                        int fd, int cmd, abi_long arg)
4784 {
4785     /* The parameter for this ioctl is a struct fiemap followed
4786      * by an array of struct fiemap_extent whose size is set
4787      * in fiemap->fm_extent_count. The array is filled in by the
4788      * ioctl.
4789      */
4790     int target_size_in, target_size_out;
4791     struct fiemap *fm;
4792     const argtype *arg_type = ie->arg_type;
4793     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4794     void *argptr, *p;
4795     abi_long ret;
4796     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4797     uint32_t outbufsz;
4798     int free_fm = 0;
4799 
4800     assert(arg_type[0] == TYPE_PTR);
4801     assert(ie->access == IOC_RW);
4802     arg_type++;
4803     target_size_in = thunk_type_size(arg_type, 0);
4804     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4805     if (!argptr) {
4806         return -TARGET_EFAULT;
4807     }
4808     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4809     unlock_user(argptr, arg, 0);
4810     fm = (struct fiemap *)buf_temp;
4811     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4812         return -TARGET_EINVAL;
4813     }
4814 
4815     outbufsz = sizeof (*fm) +
4816         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4817 
4818     if (outbufsz > MAX_STRUCT_SIZE) {
4819         /* We can't fit all the extents into the fixed size buffer.
4820          * Allocate one that is large enough and use it instead.
4821          */
4822         fm = g_try_malloc(outbufsz);
4823         if (!fm) {
4824             return -TARGET_ENOMEM;
4825         }
4826         memcpy(fm, buf_temp, sizeof(struct fiemap));
4827         free_fm = 1;
4828     }
4829     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4830     if (!is_error(ret)) {
4831         target_size_out = target_size_in;
4832         /* An extent_count of 0 means we were only counting the extents
4833          * so there are no structs to copy
4834          */
4835         if (fm->fm_extent_count != 0) {
4836             target_size_out += fm->fm_mapped_extents * extent_size;
4837         }
4838         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4839         if (!argptr) {
4840             ret = -TARGET_EFAULT;
4841         } else {
4842             /* Convert the struct fiemap */
4843             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4844             if (fm->fm_extent_count != 0) {
4845                 p = argptr + target_size_in;
4846                 /* ...and then all the struct fiemap_extents */
4847                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4848                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4849                                   THUNK_TARGET);
4850                     p += extent_size;
4851                 }
4852             }
4853             unlock_user(argptr, arg, target_size_out);
4854         }
4855     }
4856     if (free_fm) {
4857         g_free(fm);
4858     }
4859     return ret;
4860 }
4861 #endif
4862 
4863 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4864                                 int fd, int cmd, abi_long arg)
4865 {
4866     const argtype *arg_type = ie->arg_type;
4867     int target_size;
4868     void *argptr;
4869     int ret;
4870     struct ifconf *host_ifconf;
4871     uint32_t outbufsz;
4872     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4873     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4874     int target_ifreq_size;
4875     int nb_ifreq;
4876     int free_buf = 0;
4877     int i;
4878     int target_ifc_len;
4879     abi_long target_ifc_buf;
4880     int host_ifc_len;
4881     char *host_ifc_buf;
4882 
4883     assert(arg_type[0] == TYPE_PTR);
4884     assert(ie->access == IOC_RW);
4885 
4886     arg_type++;
4887     target_size = thunk_type_size(arg_type, 0);
4888 
4889     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4890     if (!argptr)
4891         return -TARGET_EFAULT;
4892     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4893     unlock_user(argptr, arg, 0);
4894 
4895     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4896     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4897     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4898 
4899     if (target_ifc_buf != 0) {
4900         target_ifc_len = host_ifconf->ifc_len;
4901         nb_ifreq = target_ifc_len / target_ifreq_size;
4902         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4903 
4904         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4905         if (outbufsz > MAX_STRUCT_SIZE) {
4906             /*
4907              * We can't fit all the extents into the fixed size buffer.
4908              * Allocate one that is large enough and use it instead.
4909              */
4910             host_ifconf = g_try_malloc(outbufsz);
4911             if (!host_ifconf) {
4912                 return -TARGET_ENOMEM;
4913             }
4914             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4915             free_buf = 1;
4916         }
4917         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4918 
4919         host_ifconf->ifc_len = host_ifc_len;
4920     } else {
4921       host_ifc_buf = NULL;
4922     }
4923     host_ifconf->ifc_buf = host_ifc_buf;
4924 
4925     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4926     if (!is_error(ret)) {
4927 	/* convert host ifc_len to target ifc_len */
4928 
4929         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4930         target_ifc_len = nb_ifreq * target_ifreq_size;
4931         host_ifconf->ifc_len = target_ifc_len;
4932 
4933 	/* restore target ifc_buf */
4934 
4935         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4936 
4937 	/* copy struct ifconf to target user */
4938 
4939         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4940         if (!argptr)
4941             return -TARGET_EFAULT;
4942         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4943         unlock_user(argptr, arg, target_size);
4944 
4945         if (target_ifc_buf != 0) {
4946             /* copy ifreq[] to target user */
4947             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4948             for (i = 0; i < nb_ifreq ; i++) {
4949                 thunk_convert(argptr + i * target_ifreq_size,
4950                               host_ifc_buf + i * sizeof(struct ifreq),
4951                               ifreq_arg_type, THUNK_TARGET);
4952             }
4953             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4954         }
4955     }
4956 
4957     if (free_buf) {
4958         g_free(host_ifconf);
4959     }
4960 
4961     return ret;
4962 }
4963 
4964 #if defined(CONFIG_USBFS)
4965 #if HOST_LONG_BITS > 64
4966 #error USBDEVFS thunks do not support >64 bit hosts yet.
4967 #endif
4968 struct live_urb {
4969     uint64_t target_urb_adr;
4970     uint64_t target_buf_adr;
4971     char *target_buf_ptr;
4972     struct usbdevfs_urb host_urb;
4973 };
4974 
4975 static GHashTable *usbdevfs_urb_hashtable(void)
4976 {
4977     static GHashTable *urb_hashtable;
4978 
4979     if (!urb_hashtable) {
4980         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4981     }
4982     return urb_hashtable;
4983 }
4984 
4985 static void urb_hashtable_insert(struct live_urb *urb)
4986 {
4987     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4988     g_hash_table_insert(urb_hashtable, urb, urb);
4989 }
4990 
4991 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4992 {
4993     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4994     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4995 }
4996 
4997 static void urb_hashtable_remove(struct live_urb *urb)
4998 {
4999     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5000     g_hash_table_remove(urb_hashtable, urb);
5001 }
5002 
5003 static abi_long
5004 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5005                           int fd, int cmd, abi_long arg)
5006 {
5007     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5008     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5009     struct live_urb *lurb;
5010     void *argptr;
5011     uint64_t hurb;
5012     int target_size;
5013     uintptr_t target_urb_adr;
5014     abi_long ret;
5015 
5016     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5017 
5018     memset(buf_temp, 0, sizeof(uint64_t));
5019     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5020     if (is_error(ret)) {
5021         return ret;
5022     }
5023 
5024     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5025     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5026     if (!lurb->target_urb_adr) {
5027         return -TARGET_EFAULT;
5028     }
5029     urb_hashtable_remove(lurb);
5030     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5031         lurb->host_urb.buffer_length);
5032     lurb->target_buf_ptr = NULL;
5033 
5034     /* restore the guest buffer pointer */
5035     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5036 
5037     /* update the guest urb struct */
5038     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5039     if (!argptr) {
5040         g_free(lurb);
5041         return -TARGET_EFAULT;
5042     }
5043     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5044     unlock_user(argptr, lurb->target_urb_adr, target_size);
5045 
5046     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5047     /* write back the urb handle */
5048     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5049     if (!argptr) {
5050         g_free(lurb);
5051         return -TARGET_EFAULT;
5052     }
5053 
5054     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5055     target_urb_adr = lurb->target_urb_adr;
5056     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5057     unlock_user(argptr, arg, target_size);
5058 
5059     g_free(lurb);
5060     return ret;
5061 }
5062 
5063 static abi_long
5064 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5065                              uint8_t *buf_temp __attribute__((unused)),
5066                              int fd, int cmd, abi_long arg)
5067 {
5068     struct live_urb *lurb;
5069 
5070     /* map target address back to host URB with metadata. */
5071     lurb = urb_hashtable_lookup(arg);
5072     if (!lurb) {
5073         return -TARGET_EFAULT;
5074     }
5075     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5076 }
5077 
5078 static abi_long
5079 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5080                             int fd, int cmd, abi_long arg)
5081 {
5082     const argtype *arg_type = ie->arg_type;
5083     int target_size;
5084     abi_long ret;
5085     void *argptr;
5086     int rw_dir;
5087     struct live_urb *lurb;
5088 
5089     /*
5090      * each submitted URB needs to map to a unique ID for the
5091      * kernel, and that unique ID needs to be a pointer to
5092      * host memory.  hence, we need to malloc for each URB.
5093      * isochronous transfers have a variable length struct.
5094      */
5095     arg_type++;
5096     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5097 
5098     /* construct host copy of urb and metadata */
5099     lurb = g_try_new0(struct live_urb, 1);
5100     if (!lurb) {
5101         return -TARGET_ENOMEM;
5102     }
5103 
5104     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5105     if (!argptr) {
5106         g_free(lurb);
5107         return -TARGET_EFAULT;
5108     }
5109     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5110     unlock_user(argptr, arg, 0);
5111 
5112     lurb->target_urb_adr = arg;
5113     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5114 
5115     /* buffer space used depends on endpoint type so lock the entire buffer */
5116     /* control type urbs should check the buffer contents for true direction */
5117     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5118     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5119         lurb->host_urb.buffer_length, 1);
5120     if (lurb->target_buf_ptr == NULL) {
5121         g_free(lurb);
5122         return -TARGET_EFAULT;
5123     }
5124 
5125     /* update buffer pointer in host copy */
5126     lurb->host_urb.buffer = lurb->target_buf_ptr;
5127 
5128     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5129     if (is_error(ret)) {
5130         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5131         g_free(lurb);
5132     } else {
5133         urb_hashtable_insert(lurb);
5134     }
5135 
5136     return ret;
5137 }
5138 #endif /* CONFIG_USBFS */
5139 
5140 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5141                             int cmd, abi_long arg)
5142 {
5143     void *argptr;
5144     struct dm_ioctl *host_dm;
5145     abi_long guest_data;
5146     uint32_t guest_data_size;
5147     int target_size;
5148     const argtype *arg_type = ie->arg_type;
5149     abi_long ret;
5150     void *big_buf = NULL;
5151     char *host_data;
5152 
5153     arg_type++;
5154     target_size = thunk_type_size(arg_type, 0);
5155     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5156     if (!argptr) {
5157         ret = -TARGET_EFAULT;
5158         goto out;
5159     }
5160     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5161     unlock_user(argptr, arg, 0);
5162 
5163     /* buf_temp is too small, so fetch things into a bigger buffer */
5164     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5165     memcpy(big_buf, buf_temp, target_size);
5166     buf_temp = big_buf;
5167     host_dm = big_buf;
5168 
5169     guest_data = arg + host_dm->data_start;
5170     if ((guest_data - arg) < 0) {
5171         ret = -TARGET_EINVAL;
5172         goto out;
5173     }
5174     guest_data_size = host_dm->data_size - host_dm->data_start;
5175     host_data = (char*)host_dm + host_dm->data_start;
5176 
5177     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5178     if (!argptr) {
5179         ret = -TARGET_EFAULT;
5180         goto out;
5181     }
5182 
5183     switch (ie->host_cmd) {
5184     case DM_REMOVE_ALL:
5185     case DM_LIST_DEVICES:
5186     case DM_DEV_CREATE:
5187     case DM_DEV_REMOVE:
5188     case DM_DEV_SUSPEND:
5189     case DM_DEV_STATUS:
5190     case DM_DEV_WAIT:
5191     case DM_TABLE_STATUS:
5192     case DM_TABLE_CLEAR:
5193     case DM_TABLE_DEPS:
5194     case DM_LIST_VERSIONS:
5195         /* no input data */
5196         break;
5197     case DM_DEV_RENAME:
5198     case DM_DEV_SET_GEOMETRY:
5199         /* data contains only strings */
5200         memcpy(host_data, argptr, guest_data_size);
5201         break;
5202     case DM_TARGET_MSG:
5203         memcpy(host_data, argptr, guest_data_size);
5204         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5205         break;
5206     case DM_TABLE_LOAD:
5207     {
5208         void *gspec = argptr;
5209         void *cur_data = host_data;
5210         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5211         int spec_size = thunk_type_size(arg_type, 0);
5212         int i;
5213 
5214         for (i = 0; i < host_dm->target_count; i++) {
5215             struct dm_target_spec *spec = cur_data;
5216             uint32_t next;
5217             int slen;
5218 
5219             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5220             slen = strlen((char*)gspec + spec_size) + 1;
5221             next = spec->next;
5222             spec->next = sizeof(*spec) + slen;
5223             strcpy((char*)&spec[1], gspec + spec_size);
5224             gspec += next;
5225             cur_data += spec->next;
5226         }
5227         break;
5228     }
5229     default:
5230         ret = -TARGET_EINVAL;
5231         unlock_user(argptr, guest_data, 0);
5232         goto out;
5233     }
5234     unlock_user(argptr, guest_data, 0);
5235 
5236     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5237     if (!is_error(ret)) {
5238         guest_data = arg + host_dm->data_start;
5239         guest_data_size = host_dm->data_size - host_dm->data_start;
5240         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5241         switch (ie->host_cmd) {
5242         case DM_REMOVE_ALL:
5243         case DM_DEV_CREATE:
5244         case DM_DEV_REMOVE:
5245         case DM_DEV_RENAME:
5246         case DM_DEV_SUSPEND:
5247         case DM_DEV_STATUS:
5248         case DM_TABLE_LOAD:
5249         case DM_TABLE_CLEAR:
5250         case DM_TARGET_MSG:
5251         case DM_DEV_SET_GEOMETRY:
5252             /* no return data */
5253             break;
5254         case DM_LIST_DEVICES:
5255         {
5256             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5257             uint32_t remaining_data = guest_data_size;
5258             void *cur_data = argptr;
5259             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5260             int nl_size = 12; /* can't use thunk_size due to alignment */
5261 
5262             while (1) {
5263                 uint32_t next = nl->next;
5264                 if (next) {
5265                     nl->next = nl_size + (strlen(nl->name) + 1);
5266                 }
5267                 if (remaining_data < nl->next) {
5268                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5269                     break;
5270                 }
5271                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5272                 strcpy(cur_data + nl_size, nl->name);
5273                 cur_data += nl->next;
5274                 remaining_data -= nl->next;
5275                 if (!next) {
5276                     break;
5277                 }
5278                 nl = (void*)nl + next;
5279             }
5280             break;
5281         }
5282         case DM_DEV_WAIT:
5283         case DM_TABLE_STATUS:
5284         {
5285             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5286             void *cur_data = argptr;
5287             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5288             int spec_size = thunk_type_size(arg_type, 0);
5289             int i;
5290 
5291             for (i = 0; i < host_dm->target_count; i++) {
5292                 uint32_t next = spec->next;
5293                 int slen = strlen((char*)&spec[1]) + 1;
5294                 spec->next = (cur_data - argptr) + spec_size + slen;
5295                 if (guest_data_size < spec->next) {
5296                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5297                     break;
5298                 }
5299                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5300                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5301                 cur_data = argptr + spec->next;
5302                 spec = (void*)host_dm + host_dm->data_start + next;
5303             }
5304             break;
5305         }
5306         case DM_TABLE_DEPS:
5307         {
5308             void *hdata = (void*)host_dm + host_dm->data_start;
5309             int count = *(uint32_t*)hdata;
5310             uint64_t *hdev = hdata + 8;
5311             uint64_t *gdev = argptr + 8;
5312             int i;
5313 
5314             *(uint32_t*)argptr = tswap32(count);
5315             for (i = 0; i < count; i++) {
5316                 *gdev = tswap64(*hdev);
5317                 gdev++;
5318                 hdev++;
5319             }
5320             break;
5321         }
5322         case DM_LIST_VERSIONS:
5323         {
5324             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5325             uint32_t remaining_data = guest_data_size;
5326             void *cur_data = argptr;
5327             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5328             int vers_size = thunk_type_size(arg_type, 0);
5329 
5330             while (1) {
5331                 uint32_t next = vers->next;
5332                 if (next) {
5333                     vers->next = vers_size + (strlen(vers->name) + 1);
5334                 }
5335                 if (remaining_data < vers->next) {
5336                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5337                     break;
5338                 }
5339                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5340                 strcpy(cur_data + vers_size, vers->name);
5341                 cur_data += vers->next;
5342                 remaining_data -= vers->next;
5343                 if (!next) {
5344                     break;
5345                 }
5346                 vers = (void*)vers + next;
5347             }
5348             break;
5349         }
5350         default:
5351             unlock_user(argptr, guest_data, 0);
5352             ret = -TARGET_EINVAL;
5353             goto out;
5354         }
5355         unlock_user(argptr, guest_data, guest_data_size);
5356 
5357         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5358         if (!argptr) {
5359             ret = -TARGET_EFAULT;
5360             goto out;
5361         }
5362         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5363         unlock_user(argptr, arg, target_size);
5364     }
5365 out:
5366     g_free(big_buf);
5367     return ret;
5368 }
5369 
5370 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5371                                int cmd, abi_long arg)
5372 {
5373     void *argptr;
5374     int target_size;
5375     const argtype *arg_type = ie->arg_type;
5376     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5377     abi_long ret;
5378 
5379     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5380     struct blkpg_partition host_part;
5381 
5382     /* Read and convert blkpg */
5383     arg_type++;
5384     target_size = thunk_type_size(arg_type, 0);
5385     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5386     if (!argptr) {
5387         ret = -TARGET_EFAULT;
5388         goto out;
5389     }
5390     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5391     unlock_user(argptr, arg, 0);
5392 
5393     switch (host_blkpg->op) {
5394     case BLKPG_ADD_PARTITION:
5395     case BLKPG_DEL_PARTITION:
5396         /* payload is struct blkpg_partition */
5397         break;
5398     default:
5399         /* Unknown opcode */
5400         ret = -TARGET_EINVAL;
5401         goto out;
5402     }
5403 
5404     /* Read and convert blkpg->data */
5405     arg = (abi_long)(uintptr_t)host_blkpg->data;
5406     target_size = thunk_type_size(part_arg_type, 0);
5407     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5408     if (!argptr) {
5409         ret = -TARGET_EFAULT;
5410         goto out;
5411     }
5412     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5413     unlock_user(argptr, arg, 0);
5414 
5415     /* Swizzle the data pointer to our local copy and call! */
5416     host_blkpg->data = &host_part;
5417     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5418 
5419 out:
5420     return ret;
5421 }
5422 
5423 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5424                                 int fd, int cmd, abi_long arg)
5425 {
5426     const argtype *arg_type = ie->arg_type;
5427     const StructEntry *se;
5428     const argtype *field_types;
5429     const int *dst_offsets, *src_offsets;
5430     int target_size;
5431     void *argptr;
5432     abi_ulong *target_rt_dev_ptr = NULL;
5433     unsigned long *host_rt_dev_ptr = NULL;
5434     abi_long ret;
5435     int i;
5436 
5437     assert(ie->access == IOC_W);
5438     assert(*arg_type == TYPE_PTR);
5439     arg_type++;
5440     assert(*arg_type == TYPE_STRUCT);
5441     target_size = thunk_type_size(arg_type, 0);
5442     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5443     if (!argptr) {
5444         return -TARGET_EFAULT;
5445     }
5446     arg_type++;
5447     assert(*arg_type == (int)STRUCT_rtentry);
5448     se = struct_entries + *arg_type++;
5449     assert(se->convert[0] == NULL);
5450     /* convert struct here to be able to catch rt_dev string */
5451     field_types = se->field_types;
5452     dst_offsets = se->field_offsets[THUNK_HOST];
5453     src_offsets = se->field_offsets[THUNK_TARGET];
5454     for (i = 0; i < se->nb_fields; i++) {
5455         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5456             assert(*field_types == TYPE_PTRVOID);
5457             target_rt_dev_ptr = argptr + src_offsets[i];
5458             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5459             if (*target_rt_dev_ptr != 0) {
5460                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5461                                                   tswapal(*target_rt_dev_ptr));
5462                 if (!*host_rt_dev_ptr) {
5463                     unlock_user(argptr, arg, 0);
5464                     return -TARGET_EFAULT;
5465                 }
5466             } else {
5467                 *host_rt_dev_ptr = 0;
5468             }
5469             field_types++;
5470             continue;
5471         }
5472         field_types = thunk_convert(buf_temp + dst_offsets[i],
5473                                     argptr + src_offsets[i],
5474                                     field_types, THUNK_HOST);
5475     }
5476     unlock_user(argptr, arg, 0);
5477 
5478     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5479 
5480     assert(host_rt_dev_ptr != NULL);
5481     assert(target_rt_dev_ptr != NULL);
5482     if (*host_rt_dev_ptr != 0) {
5483         unlock_user((void *)*host_rt_dev_ptr,
5484                     *target_rt_dev_ptr, 0);
5485     }
5486     return ret;
5487 }
5488 
5489 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5490                                      int fd, int cmd, abi_long arg)
5491 {
5492     int sig = target_to_host_signal(arg);
5493     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5494 }
5495 
5496 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5497                                     int fd, int cmd, abi_long arg)
5498 {
5499     struct timeval tv;
5500     abi_long ret;
5501 
5502     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5503     if (is_error(ret)) {
5504         return ret;
5505     }
5506 
5507     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5508         if (copy_to_user_timeval(arg, &tv)) {
5509             return -TARGET_EFAULT;
5510         }
5511     } else {
5512         if (copy_to_user_timeval64(arg, &tv)) {
5513             return -TARGET_EFAULT;
5514         }
5515     }
5516 
5517     return ret;
5518 }
5519 
5520 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5521                                       int fd, int cmd, abi_long arg)
5522 {
5523     struct timespec ts;
5524     abi_long ret;
5525 
5526     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5527     if (is_error(ret)) {
5528         return ret;
5529     }
5530 
5531     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5532         if (host_to_target_timespec(arg, &ts)) {
5533             return -TARGET_EFAULT;
5534         }
5535     } else{
5536         if (host_to_target_timespec64(arg, &ts)) {
5537             return -TARGET_EFAULT;
5538         }
5539     }
5540 
5541     return ret;
5542 }
5543 
5544 #ifdef TIOCGPTPEER
5545 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5546                                      int fd, int cmd, abi_long arg)
5547 {
5548     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5549     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5550 }
5551 #endif
5552 
5553 #ifdef HAVE_DRM_H
5554 
5555 static void unlock_drm_version(struct drm_version *host_ver,
5556                                struct target_drm_version *target_ver,
5557                                bool copy)
5558 {
5559     unlock_user(host_ver->name, target_ver->name,
5560                                 copy ? host_ver->name_len : 0);
5561     unlock_user(host_ver->date, target_ver->date,
5562                                 copy ? host_ver->date_len : 0);
5563     unlock_user(host_ver->desc, target_ver->desc,
5564                                 copy ? host_ver->desc_len : 0);
5565 }
5566 
5567 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5568                                           struct target_drm_version *target_ver)
5569 {
5570     memset(host_ver, 0, sizeof(*host_ver));
5571 
5572     __get_user(host_ver->name_len, &target_ver->name_len);
5573     if (host_ver->name_len) {
5574         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5575                                    target_ver->name_len, 0);
5576         if (!host_ver->name) {
5577             return -EFAULT;
5578         }
5579     }
5580 
5581     __get_user(host_ver->date_len, &target_ver->date_len);
5582     if (host_ver->date_len) {
5583         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5584                                    target_ver->date_len, 0);
5585         if (!host_ver->date) {
5586             goto err;
5587         }
5588     }
5589 
5590     __get_user(host_ver->desc_len, &target_ver->desc_len);
5591     if (host_ver->desc_len) {
5592         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5593                                    target_ver->desc_len, 0);
5594         if (!host_ver->desc) {
5595             goto err;
5596         }
5597     }
5598 
5599     return 0;
5600 err:
5601     unlock_drm_version(host_ver, target_ver, false);
5602     return -EFAULT;
5603 }
5604 
5605 static inline void host_to_target_drmversion(
5606                                           struct target_drm_version *target_ver,
5607                                           struct drm_version *host_ver)
5608 {
5609     __put_user(host_ver->version_major, &target_ver->version_major);
5610     __put_user(host_ver->version_minor, &target_ver->version_minor);
5611     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5612     __put_user(host_ver->name_len, &target_ver->name_len);
5613     __put_user(host_ver->date_len, &target_ver->date_len);
5614     __put_user(host_ver->desc_len, &target_ver->desc_len);
5615     unlock_drm_version(host_ver, target_ver, true);
5616 }
5617 
5618 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5619                              int fd, int cmd, abi_long arg)
5620 {
5621     struct drm_version *ver;
5622     struct target_drm_version *target_ver;
5623     abi_long ret;
5624 
5625     switch (ie->host_cmd) {
5626     case DRM_IOCTL_VERSION:
5627         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5628             return -TARGET_EFAULT;
5629         }
5630         ver = (struct drm_version *)buf_temp;
5631         ret = target_to_host_drmversion(ver, target_ver);
5632         if (!is_error(ret)) {
5633             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5634             if (is_error(ret)) {
5635                 unlock_drm_version(ver, target_ver, false);
5636             } else {
5637                 host_to_target_drmversion(target_ver, ver);
5638             }
5639         }
5640         unlock_user_struct(target_ver, arg, 0);
5641         return ret;
5642     }
5643     return -TARGET_ENOSYS;
5644 }
5645 
5646 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5647                                            struct drm_i915_getparam *gparam,
5648                                            int fd, abi_long arg)
5649 {
5650     abi_long ret;
5651     int value;
5652     struct target_drm_i915_getparam *target_gparam;
5653 
5654     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5655         return -TARGET_EFAULT;
5656     }
5657 
5658     __get_user(gparam->param, &target_gparam->param);
5659     gparam->value = &value;
5660     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5661     put_user_s32(value, target_gparam->value);
5662 
5663     unlock_user_struct(target_gparam, arg, 0);
5664     return ret;
5665 }
5666 
5667 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5668                                   int fd, int cmd, abi_long arg)
5669 {
5670     switch (ie->host_cmd) {
5671     case DRM_IOCTL_I915_GETPARAM:
5672         return do_ioctl_drm_i915_getparam(ie,
5673                                           (struct drm_i915_getparam *)buf_temp,
5674                                           fd, arg);
5675     default:
5676         return -TARGET_ENOSYS;
5677     }
5678 }
5679 
5680 #endif
5681 
5682 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5683                                         int fd, int cmd, abi_long arg)
5684 {
5685     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5686     struct tun_filter *target_filter;
5687     char *target_addr;
5688 
5689     assert(ie->access == IOC_W);
5690 
5691     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5692     if (!target_filter) {
5693         return -TARGET_EFAULT;
5694     }
5695     filter->flags = tswap16(target_filter->flags);
5696     filter->count = tswap16(target_filter->count);
5697     unlock_user(target_filter, arg, 0);
5698 
5699     if (filter->count) {
5700         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5701             MAX_STRUCT_SIZE) {
5702             return -TARGET_EFAULT;
5703         }
5704 
5705         target_addr = lock_user(VERIFY_READ,
5706                                 arg + offsetof(struct tun_filter, addr),
5707                                 filter->count * ETH_ALEN, 1);
5708         if (!target_addr) {
5709             return -TARGET_EFAULT;
5710         }
5711         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5712         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5713     }
5714 
5715     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5716 }
5717 
5718 IOCTLEntry ioctl_entries[] = {
5719 #define IOCTL(cmd, access, ...) \
5720     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5721 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5722     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5723 #define IOCTL_IGNORE(cmd) \
5724     { TARGET_ ## cmd, 0, #cmd },
5725 #include "ioctls.h"
5726     { 0, 0, },
5727 };
5728 
5729 /* ??? Implement proper locking for ioctls.  */
5730 /* do_ioctl() Must return target values and target errnos. */
5731 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5732 {
5733     const IOCTLEntry *ie;
5734     const argtype *arg_type;
5735     abi_long ret;
5736     uint8_t buf_temp[MAX_STRUCT_SIZE];
5737     int target_size;
5738     void *argptr;
5739 
5740     ie = ioctl_entries;
5741     for(;;) {
5742         if (ie->target_cmd == 0) {
5743             qemu_log_mask(
5744                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5745             return -TARGET_ENOSYS;
5746         }
5747         if (ie->target_cmd == cmd)
5748             break;
5749         ie++;
5750     }
5751     arg_type = ie->arg_type;
5752     if (ie->do_ioctl) {
5753         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5754     } else if (!ie->host_cmd) {
5755         /* Some architectures define BSD ioctls in their headers
5756            that are not implemented in Linux.  */
5757         return -TARGET_ENOSYS;
5758     }
5759 
5760     switch(arg_type[0]) {
5761     case TYPE_NULL:
5762         /* no argument */
5763         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5764         break;
5765     case TYPE_PTRVOID:
5766     case TYPE_INT:
5767     case TYPE_LONG:
5768     case TYPE_ULONG:
5769         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5770         break;
5771     case TYPE_PTR:
5772         arg_type++;
5773         target_size = thunk_type_size(arg_type, 0);
5774         switch(ie->access) {
5775         case IOC_R:
5776             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5777             if (!is_error(ret)) {
5778                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5779                 if (!argptr)
5780                     return -TARGET_EFAULT;
5781                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5782                 unlock_user(argptr, arg, target_size);
5783             }
5784             break;
5785         case IOC_W:
5786             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5787             if (!argptr)
5788                 return -TARGET_EFAULT;
5789             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5790             unlock_user(argptr, arg, 0);
5791             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5792             break;
5793         default:
5794         case IOC_RW:
5795             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5796             if (!argptr)
5797                 return -TARGET_EFAULT;
5798             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5799             unlock_user(argptr, arg, 0);
5800             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5801             if (!is_error(ret)) {
5802                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5803                 if (!argptr)
5804                     return -TARGET_EFAULT;
5805                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5806                 unlock_user(argptr, arg, target_size);
5807             }
5808             break;
5809         }
5810         break;
5811     default:
5812         qemu_log_mask(LOG_UNIMP,
5813                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5814                       (long)cmd, arg_type[0]);
5815         ret = -TARGET_ENOSYS;
5816         break;
5817     }
5818     return ret;
5819 }
5820 
5821 static const bitmask_transtbl iflag_tbl[] = {
5822         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5823         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5824         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5825         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5826         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5827         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5828         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5829         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5830         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5831         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5832         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5833         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5834         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5835         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5836         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5837         { 0, 0, 0, 0 }
5838 };
5839 
5840 static const bitmask_transtbl oflag_tbl[] = {
5841 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5842 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5843 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5844 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5845 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5846 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5847 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5848 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5849 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5850 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5851 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5852 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5853 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5854 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5855 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5856 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5857 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5858 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5859 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5860 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5861 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5862 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5863 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5864 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5865 	{ 0, 0, 0, 0 }
5866 };
5867 
5868 static const bitmask_transtbl cflag_tbl[] = {
5869 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5870 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5871 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5872 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5873 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5874 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5875 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5876 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5877 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5878 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5879 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5880 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5881 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5882 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5883 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5884 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5885 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5886 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5887 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5888 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5889 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5890 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5891 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5892 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5893 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5894 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5895 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5896 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5897 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5898 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5899 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5900 	{ 0, 0, 0, 0 }
5901 };
5902 
5903 static const bitmask_transtbl lflag_tbl[] = {
5904   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5905   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5906   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5907   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5908   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5909   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5910   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5911   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5912   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5913   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5914   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5915   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5916   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5917   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5918   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5919   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5920   { 0, 0, 0, 0 }
5921 };
5922 
5923 static void target_to_host_termios (void *dst, const void *src)
5924 {
5925     struct host_termios *host = dst;
5926     const struct target_termios *target = src;
5927 
5928     host->c_iflag =
5929         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5930     host->c_oflag =
5931         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5932     host->c_cflag =
5933         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5934     host->c_lflag =
5935         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5936     host->c_line = target->c_line;
5937 
5938     memset(host->c_cc, 0, sizeof(host->c_cc));
5939     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5940     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5941     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5942     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5943     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5944     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5945     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5946     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5947     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5948     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5949     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5950     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5951     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5952     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5953     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5954     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5955     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5956 }
5957 
5958 static void host_to_target_termios (void *dst, const void *src)
5959 {
5960     struct target_termios *target = dst;
5961     const struct host_termios *host = src;
5962 
5963     target->c_iflag =
5964         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5965     target->c_oflag =
5966         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5967     target->c_cflag =
5968         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5969     target->c_lflag =
5970         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5971     target->c_line = host->c_line;
5972 
5973     memset(target->c_cc, 0, sizeof(target->c_cc));
5974     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5975     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5976     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5977     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5978     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5979     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5980     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5981     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5982     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5983     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5984     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5985     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5986     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5987     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5988     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5989     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5990     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5991 }
5992 
5993 static const StructEntry struct_termios_def = {
5994     .convert = { host_to_target_termios, target_to_host_termios },
5995     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5996     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5997     .print = print_termios,
5998 };
5999 
6000 static const bitmask_transtbl mmap_flags_tbl[] = {
6001     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6002     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6003     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6004     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6005       MAP_ANONYMOUS, MAP_ANONYMOUS },
6006     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6007       MAP_GROWSDOWN, MAP_GROWSDOWN },
6008     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6009       MAP_DENYWRITE, MAP_DENYWRITE },
6010     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6011       MAP_EXECUTABLE, MAP_EXECUTABLE },
6012     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6013     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6014       MAP_NORESERVE, MAP_NORESERVE },
6015     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6016     /* MAP_STACK had been ignored by the kernel for quite some time.
6017        Recognize it for the target insofar as we do not want to pass
6018        it through to the host.  */
6019     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6020     { 0, 0, 0, 0 }
6021 };
6022 
6023 /*
6024  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6025  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6026  */
6027 #if defined(TARGET_I386)
6028 
6029 /* NOTE: there is really one LDT for all the threads */
6030 static uint8_t *ldt_table;
6031 
6032 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6033 {
6034     int size;
6035     void *p;
6036 
6037     if (!ldt_table)
6038         return 0;
6039     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6040     if (size > bytecount)
6041         size = bytecount;
6042     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6043     if (!p)
6044         return -TARGET_EFAULT;
6045     /* ??? Should this by byteswapped?  */
6046     memcpy(p, ldt_table, size);
6047     unlock_user(p, ptr, size);
6048     return size;
6049 }
6050 
6051 /* XXX: add locking support */
6052 static abi_long write_ldt(CPUX86State *env,
6053                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6054 {
6055     struct target_modify_ldt_ldt_s ldt_info;
6056     struct target_modify_ldt_ldt_s *target_ldt_info;
6057     int seg_32bit, contents, read_exec_only, limit_in_pages;
6058     int seg_not_present, useable, lm;
6059     uint32_t *lp, entry_1, entry_2;
6060 
6061     if (bytecount != sizeof(ldt_info))
6062         return -TARGET_EINVAL;
6063     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6064         return -TARGET_EFAULT;
6065     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6066     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6067     ldt_info.limit = tswap32(target_ldt_info->limit);
6068     ldt_info.flags = tswap32(target_ldt_info->flags);
6069     unlock_user_struct(target_ldt_info, ptr, 0);
6070 
6071     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6072         return -TARGET_EINVAL;
6073     seg_32bit = ldt_info.flags & 1;
6074     contents = (ldt_info.flags >> 1) & 3;
6075     read_exec_only = (ldt_info.flags >> 3) & 1;
6076     limit_in_pages = (ldt_info.flags >> 4) & 1;
6077     seg_not_present = (ldt_info.flags >> 5) & 1;
6078     useable = (ldt_info.flags >> 6) & 1;
6079 #ifdef TARGET_ABI32
6080     lm = 0;
6081 #else
6082     lm = (ldt_info.flags >> 7) & 1;
6083 #endif
6084     if (contents == 3) {
6085         if (oldmode)
6086             return -TARGET_EINVAL;
6087         if (seg_not_present == 0)
6088             return -TARGET_EINVAL;
6089     }
6090     /* allocate the LDT */
6091     if (!ldt_table) {
6092         env->ldt.base = target_mmap(0,
6093                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6094                                     PROT_READ|PROT_WRITE,
6095                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6096         if (env->ldt.base == -1)
6097             return -TARGET_ENOMEM;
6098         memset(g2h_untagged(env->ldt.base), 0,
6099                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6100         env->ldt.limit = 0xffff;
6101         ldt_table = g2h_untagged(env->ldt.base);
6102     }
6103 
6104     /* NOTE: same code as Linux kernel */
6105     /* Allow LDTs to be cleared by the user. */
6106     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6107         if (oldmode ||
6108             (contents == 0		&&
6109              read_exec_only == 1	&&
6110              seg_32bit == 0		&&
6111              limit_in_pages == 0	&&
6112              seg_not_present == 1	&&
6113              useable == 0 )) {
6114             entry_1 = 0;
6115             entry_2 = 0;
6116             goto install;
6117         }
6118     }
6119 
6120     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6121         (ldt_info.limit & 0x0ffff);
6122     entry_2 = (ldt_info.base_addr & 0xff000000) |
6123         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6124         (ldt_info.limit & 0xf0000) |
6125         ((read_exec_only ^ 1) << 9) |
6126         (contents << 10) |
6127         ((seg_not_present ^ 1) << 15) |
6128         (seg_32bit << 22) |
6129         (limit_in_pages << 23) |
6130         (lm << 21) |
6131         0x7000;
6132     if (!oldmode)
6133         entry_2 |= (useable << 20);
6134 
6135     /* Install the new entry ...  */
6136 install:
6137     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6138     lp[0] = tswap32(entry_1);
6139     lp[1] = tswap32(entry_2);
6140     return 0;
6141 }
6142 
6143 /* specific and weird i386 syscalls */
6144 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6145                               unsigned long bytecount)
6146 {
6147     abi_long ret;
6148 
6149     switch (func) {
6150     case 0:
6151         ret = read_ldt(ptr, bytecount);
6152         break;
6153     case 1:
6154         ret = write_ldt(env, ptr, bytecount, 1);
6155         break;
6156     case 0x11:
6157         ret = write_ldt(env, ptr, bytecount, 0);
6158         break;
6159     default:
6160         ret = -TARGET_ENOSYS;
6161         break;
6162     }
6163     return ret;
6164 }
6165 
6166 #if defined(TARGET_ABI32)
6167 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6168 {
6169     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6170     struct target_modify_ldt_ldt_s ldt_info;
6171     struct target_modify_ldt_ldt_s *target_ldt_info;
6172     int seg_32bit, contents, read_exec_only, limit_in_pages;
6173     int seg_not_present, useable, lm;
6174     uint32_t *lp, entry_1, entry_2;
6175     int i;
6176 
6177     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6178     if (!target_ldt_info)
6179         return -TARGET_EFAULT;
6180     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6181     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6182     ldt_info.limit = tswap32(target_ldt_info->limit);
6183     ldt_info.flags = tswap32(target_ldt_info->flags);
6184     if (ldt_info.entry_number == -1) {
6185         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6186             if (gdt_table[i] == 0) {
6187                 ldt_info.entry_number = i;
6188                 target_ldt_info->entry_number = tswap32(i);
6189                 break;
6190             }
6191         }
6192     }
6193     unlock_user_struct(target_ldt_info, ptr, 1);
6194 
6195     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6196         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6197            return -TARGET_EINVAL;
6198     seg_32bit = ldt_info.flags & 1;
6199     contents = (ldt_info.flags >> 1) & 3;
6200     read_exec_only = (ldt_info.flags >> 3) & 1;
6201     limit_in_pages = (ldt_info.flags >> 4) & 1;
6202     seg_not_present = (ldt_info.flags >> 5) & 1;
6203     useable = (ldt_info.flags >> 6) & 1;
6204 #ifdef TARGET_ABI32
6205     lm = 0;
6206 #else
6207     lm = (ldt_info.flags >> 7) & 1;
6208 #endif
6209 
6210     if (contents == 3) {
6211         if (seg_not_present == 0)
6212             return -TARGET_EINVAL;
6213     }
6214 
6215     /* NOTE: same code as Linux kernel */
6216     /* Allow LDTs to be cleared by the user. */
6217     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6218         if ((contents == 0             &&
6219              read_exec_only == 1       &&
6220              seg_32bit == 0            &&
6221              limit_in_pages == 0       &&
6222              seg_not_present == 1      &&
6223              useable == 0 )) {
6224             entry_1 = 0;
6225             entry_2 = 0;
6226             goto install;
6227         }
6228     }
6229 
6230     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6231         (ldt_info.limit & 0x0ffff);
6232     entry_2 = (ldt_info.base_addr & 0xff000000) |
6233         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6234         (ldt_info.limit & 0xf0000) |
6235         ((read_exec_only ^ 1) << 9) |
6236         (contents << 10) |
6237         ((seg_not_present ^ 1) << 15) |
6238         (seg_32bit << 22) |
6239         (limit_in_pages << 23) |
6240         (useable << 20) |
6241         (lm << 21) |
6242         0x7000;
6243 
6244     /* Install the new entry ...  */
6245 install:
6246     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6247     lp[0] = tswap32(entry_1);
6248     lp[1] = tswap32(entry_2);
6249     return 0;
6250 }
6251 
6252 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6253 {
6254     struct target_modify_ldt_ldt_s *target_ldt_info;
6255     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6256     uint32_t base_addr, limit, flags;
6257     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6258     int seg_not_present, useable, lm;
6259     uint32_t *lp, entry_1, entry_2;
6260 
6261     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6262     if (!target_ldt_info)
6263         return -TARGET_EFAULT;
6264     idx = tswap32(target_ldt_info->entry_number);
6265     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6266         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6267         unlock_user_struct(target_ldt_info, ptr, 1);
6268         return -TARGET_EINVAL;
6269     }
6270     lp = (uint32_t *)(gdt_table + idx);
6271     entry_1 = tswap32(lp[0]);
6272     entry_2 = tswap32(lp[1]);
6273 
6274     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6275     contents = (entry_2 >> 10) & 3;
6276     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6277     seg_32bit = (entry_2 >> 22) & 1;
6278     limit_in_pages = (entry_2 >> 23) & 1;
6279     useable = (entry_2 >> 20) & 1;
6280 #ifdef TARGET_ABI32
6281     lm = 0;
6282 #else
6283     lm = (entry_2 >> 21) & 1;
6284 #endif
6285     flags = (seg_32bit << 0) | (contents << 1) |
6286         (read_exec_only << 3) | (limit_in_pages << 4) |
6287         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6288     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6289     base_addr = (entry_1 >> 16) |
6290         (entry_2 & 0xff000000) |
6291         ((entry_2 & 0xff) << 16);
6292     target_ldt_info->base_addr = tswapal(base_addr);
6293     target_ldt_info->limit = tswap32(limit);
6294     target_ldt_info->flags = tswap32(flags);
6295     unlock_user_struct(target_ldt_info, ptr, 1);
6296     return 0;
6297 }
6298 
6299 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6300 {
6301     return -TARGET_ENOSYS;
6302 }
6303 #else
6304 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6305 {
6306     abi_long ret = 0;
6307     abi_ulong val;
6308     int idx;
6309 
6310     switch(code) {
6311     case TARGET_ARCH_SET_GS:
6312     case TARGET_ARCH_SET_FS:
6313         if (code == TARGET_ARCH_SET_GS)
6314             idx = R_GS;
6315         else
6316             idx = R_FS;
6317         cpu_x86_load_seg(env, idx, 0);
6318         env->segs[idx].base = addr;
6319         break;
6320     case TARGET_ARCH_GET_GS:
6321     case TARGET_ARCH_GET_FS:
6322         if (code == TARGET_ARCH_GET_GS)
6323             idx = R_GS;
6324         else
6325             idx = R_FS;
6326         val = env->segs[idx].base;
6327         if (put_user(val, addr, abi_ulong))
6328             ret = -TARGET_EFAULT;
6329         break;
6330     default:
6331         ret = -TARGET_EINVAL;
6332         break;
6333     }
6334     return ret;
6335 }
6336 #endif /* defined(TARGET_ABI32 */
6337 #endif /* defined(TARGET_I386) */
6338 
6339 /*
6340  * These constants are generic.  Supply any that are missing from the host.
6341  */
6342 #ifndef PR_SET_NAME
6343 # define PR_SET_NAME    15
6344 # define PR_GET_NAME    16
6345 #endif
6346 #ifndef PR_SET_FP_MODE
6347 # define PR_SET_FP_MODE 45
6348 # define PR_GET_FP_MODE 46
6349 # define PR_FP_MODE_FR   (1 << 0)
6350 # define PR_FP_MODE_FRE  (1 << 1)
6351 #endif
6352 #ifndef PR_SVE_SET_VL
6353 # define PR_SVE_SET_VL  50
6354 # define PR_SVE_GET_VL  51
6355 # define PR_SVE_VL_LEN_MASK  0xffff
6356 # define PR_SVE_VL_INHERIT   (1 << 17)
6357 #endif
6358 #ifndef PR_PAC_RESET_KEYS
6359 # define PR_PAC_RESET_KEYS  54
6360 # define PR_PAC_APIAKEY   (1 << 0)
6361 # define PR_PAC_APIBKEY   (1 << 1)
6362 # define PR_PAC_APDAKEY   (1 << 2)
6363 # define PR_PAC_APDBKEY   (1 << 3)
6364 # define PR_PAC_APGAKEY   (1 << 4)
6365 #endif
6366 #ifndef PR_SET_TAGGED_ADDR_CTRL
6367 # define PR_SET_TAGGED_ADDR_CTRL 55
6368 # define PR_GET_TAGGED_ADDR_CTRL 56
6369 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6370 #endif
6371 #ifndef PR_MTE_TCF_SHIFT
6372 # define PR_MTE_TCF_SHIFT       1
6373 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6374 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6375 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6376 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6377 # define PR_MTE_TAG_SHIFT       3
6378 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6379 #endif
6380 #ifndef PR_SET_IO_FLUSHER
6381 # define PR_SET_IO_FLUSHER 57
6382 # define PR_GET_IO_FLUSHER 58
6383 #endif
6384 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6385 # define PR_SET_SYSCALL_USER_DISPATCH 59
6386 #endif
6387 #ifndef PR_SME_SET_VL
6388 # define PR_SME_SET_VL  63
6389 # define PR_SME_GET_VL  64
6390 # define PR_SME_VL_LEN_MASK  0xffff
6391 # define PR_SME_VL_INHERIT   (1 << 17)
6392 #endif
6393 
6394 #include "target_prctl.h"
6395 
6396 static abi_long do_prctl_inval0(CPUArchState *env)
6397 {
6398     return -TARGET_EINVAL;
6399 }
6400 
6401 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6402 {
6403     return -TARGET_EINVAL;
6404 }
6405 
6406 #ifndef do_prctl_get_fp_mode
6407 #define do_prctl_get_fp_mode do_prctl_inval0
6408 #endif
6409 #ifndef do_prctl_set_fp_mode
6410 #define do_prctl_set_fp_mode do_prctl_inval1
6411 #endif
6412 #ifndef do_prctl_sve_get_vl
6413 #define do_prctl_sve_get_vl do_prctl_inval0
6414 #endif
6415 #ifndef do_prctl_sve_set_vl
6416 #define do_prctl_sve_set_vl do_prctl_inval1
6417 #endif
6418 #ifndef do_prctl_reset_keys
6419 #define do_prctl_reset_keys do_prctl_inval1
6420 #endif
6421 #ifndef do_prctl_set_tagged_addr_ctrl
6422 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6423 #endif
6424 #ifndef do_prctl_get_tagged_addr_ctrl
6425 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6426 #endif
6427 #ifndef do_prctl_get_unalign
6428 #define do_prctl_get_unalign do_prctl_inval1
6429 #endif
6430 #ifndef do_prctl_set_unalign
6431 #define do_prctl_set_unalign do_prctl_inval1
6432 #endif
6433 #ifndef do_prctl_sme_get_vl
6434 #define do_prctl_sme_get_vl do_prctl_inval0
6435 #endif
6436 #ifndef do_prctl_sme_set_vl
6437 #define do_prctl_sme_set_vl do_prctl_inval1
6438 #endif
6439 
6440 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6441                          abi_long arg3, abi_long arg4, abi_long arg5)
6442 {
6443     abi_long ret;
6444 
6445     switch (option) {
6446     case PR_GET_PDEATHSIG:
6447         {
6448             int deathsig;
6449             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6450                                   arg3, arg4, arg5));
6451             if (!is_error(ret) &&
6452                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6453                 return -TARGET_EFAULT;
6454             }
6455             return ret;
6456         }
6457     case PR_SET_PDEATHSIG:
6458         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6459                                arg3, arg4, arg5));
6460     case PR_GET_NAME:
6461         {
6462             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6463             if (!name) {
6464                 return -TARGET_EFAULT;
6465             }
6466             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6467                                   arg3, arg4, arg5));
6468             unlock_user(name, arg2, 16);
6469             return ret;
6470         }
6471     case PR_SET_NAME:
6472         {
6473             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6474             if (!name) {
6475                 return -TARGET_EFAULT;
6476             }
6477             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6478                                   arg3, arg4, arg5));
6479             unlock_user(name, arg2, 0);
6480             return ret;
6481         }
6482     case PR_GET_FP_MODE:
6483         return do_prctl_get_fp_mode(env);
6484     case PR_SET_FP_MODE:
6485         return do_prctl_set_fp_mode(env, arg2);
6486     case PR_SVE_GET_VL:
6487         return do_prctl_sve_get_vl(env);
6488     case PR_SVE_SET_VL:
6489         return do_prctl_sve_set_vl(env, arg2);
6490     case PR_SME_GET_VL:
6491         return do_prctl_sme_get_vl(env);
6492     case PR_SME_SET_VL:
6493         return do_prctl_sme_set_vl(env, arg2);
6494     case PR_PAC_RESET_KEYS:
6495         if (arg3 || arg4 || arg5) {
6496             return -TARGET_EINVAL;
6497         }
6498         return do_prctl_reset_keys(env, arg2);
6499     case PR_SET_TAGGED_ADDR_CTRL:
6500         if (arg3 || arg4 || arg5) {
6501             return -TARGET_EINVAL;
6502         }
6503         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6504     case PR_GET_TAGGED_ADDR_CTRL:
6505         if (arg2 || arg3 || arg4 || arg5) {
6506             return -TARGET_EINVAL;
6507         }
6508         return do_prctl_get_tagged_addr_ctrl(env);
6509 
6510     case PR_GET_UNALIGN:
6511         return do_prctl_get_unalign(env, arg2);
6512     case PR_SET_UNALIGN:
6513         return do_prctl_set_unalign(env, arg2);
6514 
6515     case PR_CAP_AMBIENT:
6516     case PR_CAPBSET_READ:
6517     case PR_CAPBSET_DROP:
6518     case PR_GET_DUMPABLE:
6519     case PR_SET_DUMPABLE:
6520     case PR_GET_KEEPCAPS:
6521     case PR_SET_KEEPCAPS:
6522     case PR_GET_SECUREBITS:
6523     case PR_SET_SECUREBITS:
6524     case PR_GET_TIMING:
6525     case PR_SET_TIMING:
6526     case PR_GET_TIMERSLACK:
6527     case PR_SET_TIMERSLACK:
6528     case PR_MCE_KILL:
6529     case PR_MCE_KILL_GET:
6530     case PR_GET_NO_NEW_PRIVS:
6531     case PR_SET_NO_NEW_PRIVS:
6532     case PR_GET_IO_FLUSHER:
6533     case PR_SET_IO_FLUSHER:
6534         /* Some prctl options have no pointer arguments and we can pass on. */
6535         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6536 
6537     case PR_GET_CHILD_SUBREAPER:
6538     case PR_SET_CHILD_SUBREAPER:
6539     case PR_GET_SPECULATION_CTRL:
6540     case PR_SET_SPECULATION_CTRL:
6541     case PR_GET_TID_ADDRESS:
6542         /* TODO */
6543         return -TARGET_EINVAL;
6544 
6545     case PR_GET_FPEXC:
6546     case PR_SET_FPEXC:
6547         /* Was used for SPE on PowerPC. */
6548         return -TARGET_EINVAL;
6549 
6550     case PR_GET_ENDIAN:
6551     case PR_SET_ENDIAN:
6552     case PR_GET_FPEMU:
6553     case PR_SET_FPEMU:
6554     case PR_SET_MM:
6555     case PR_GET_SECCOMP:
6556     case PR_SET_SECCOMP:
6557     case PR_SET_SYSCALL_USER_DISPATCH:
6558     case PR_GET_THP_DISABLE:
6559     case PR_SET_THP_DISABLE:
6560     case PR_GET_TSC:
6561     case PR_SET_TSC:
6562         /* Disable to prevent the target disabling stuff we need. */
6563         return -TARGET_EINVAL;
6564 
6565     default:
6566         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6567                       option);
6568         return -TARGET_EINVAL;
6569     }
6570 }
6571 
6572 #define NEW_STACK_SIZE 0x40000
6573 
6574 
6575 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6576 typedef struct {
6577     CPUArchState *env;
6578     pthread_mutex_t mutex;
6579     pthread_cond_t cond;
6580     pthread_t thread;
6581     uint32_t tid;
6582     abi_ulong child_tidptr;
6583     abi_ulong parent_tidptr;
6584     sigset_t sigmask;
6585 } new_thread_info;
6586 
6587 static void *clone_func(void *arg)
6588 {
6589     new_thread_info *info = arg;
6590     CPUArchState *env;
6591     CPUState *cpu;
6592     TaskState *ts;
6593 
6594     rcu_register_thread();
6595     tcg_register_thread();
6596     env = info->env;
6597     cpu = env_cpu(env);
6598     thread_cpu = cpu;
6599     ts = (TaskState *)cpu->opaque;
6600     info->tid = sys_gettid();
6601     task_settid(ts);
6602     if (info->child_tidptr)
6603         put_user_u32(info->tid, info->child_tidptr);
6604     if (info->parent_tidptr)
6605         put_user_u32(info->tid, info->parent_tidptr);
6606     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6607     /* Enable signals.  */
6608     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6609     /* Signal to the parent that we're ready.  */
6610     pthread_mutex_lock(&info->mutex);
6611     pthread_cond_broadcast(&info->cond);
6612     pthread_mutex_unlock(&info->mutex);
6613     /* Wait until the parent has finished initializing the tls state.  */
6614     pthread_mutex_lock(&clone_lock);
6615     pthread_mutex_unlock(&clone_lock);
6616     cpu_loop(env);
6617     /* never exits */
6618     return NULL;
6619 }
6620 
6621 /* do_fork() Must return host values and target errnos (unlike most
6622    do_*() functions). */
6623 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6624                    abi_ulong parent_tidptr, target_ulong newtls,
6625                    abi_ulong child_tidptr)
6626 {
6627     CPUState *cpu = env_cpu(env);
6628     int ret;
6629     TaskState *ts;
6630     CPUState *new_cpu;
6631     CPUArchState *new_env;
6632     sigset_t sigmask;
6633 
6634     flags &= ~CLONE_IGNORED_FLAGS;
6635 
6636     /* Emulate vfork() with fork() */
6637     if (flags & CLONE_VFORK)
6638         flags &= ~(CLONE_VFORK | CLONE_VM);
6639 
6640     if (flags & CLONE_VM) {
6641         TaskState *parent_ts = (TaskState *)cpu->opaque;
6642         new_thread_info info;
6643         pthread_attr_t attr;
6644 
6645         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6646             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6647             return -TARGET_EINVAL;
6648         }
6649 
6650         ts = g_new0(TaskState, 1);
6651         init_task_state(ts);
6652 
6653         /* Grab a mutex so that thread setup appears atomic.  */
6654         pthread_mutex_lock(&clone_lock);
6655 
6656         /*
6657          * If this is our first additional thread, we need to ensure we
6658          * generate code for parallel execution and flush old translations.
6659          * Do this now so that the copy gets CF_PARALLEL too.
6660          */
6661         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6662             cpu->tcg_cflags |= CF_PARALLEL;
6663             tb_flush(cpu);
6664         }
6665 
6666         /* we create a new CPU instance. */
6667         new_env = cpu_copy(env);
6668         /* Init regs that differ from the parent.  */
6669         cpu_clone_regs_child(new_env, newsp, flags);
6670         cpu_clone_regs_parent(env, flags);
6671         new_cpu = env_cpu(new_env);
6672         new_cpu->opaque = ts;
6673         ts->bprm = parent_ts->bprm;
6674         ts->info = parent_ts->info;
6675         ts->signal_mask = parent_ts->signal_mask;
6676 
6677         if (flags & CLONE_CHILD_CLEARTID) {
6678             ts->child_tidptr = child_tidptr;
6679         }
6680 
6681         if (flags & CLONE_SETTLS) {
6682             cpu_set_tls (new_env, newtls);
6683         }
6684 
6685         memset(&info, 0, sizeof(info));
6686         pthread_mutex_init(&info.mutex, NULL);
6687         pthread_mutex_lock(&info.mutex);
6688         pthread_cond_init(&info.cond, NULL);
6689         info.env = new_env;
6690         if (flags & CLONE_CHILD_SETTID) {
6691             info.child_tidptr = child_tidptr;
6692         }
6693         if (flags & CLONE_PARENT_SETTID) {
6694             info.parent_tidptr = parent_tidptr;
6695         }
6696 
6697         ret = pthread_attr_init(&attr);
6698         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6699         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6700         /* It is not safe to deliver signals until the child has finished
6701            initializing, so temporarily block all signals.  */
6702         sigfillset(&sigmask);
6703         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6704         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6705 
6706         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6707         /* TODO: Free new CPU state if thread creation failed.  */
6708 
6709         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6710         pthread_attr_destroy(&attr);
6711         if (ret == 0) {
6712             /* Wait for the child to initialize.  */
6713             pthread_cond_wait(&info.cond, &info.mutex);
6714             ret = info.tid;
6715         } else {
6716             ret = -1;
6717         }
6718         pthread_mutex_unlock(&info.mutex);
6719         pthread_cond_destroy(&info.cond);
6720         pthread_mutex_destroy(&info.mutex);
6721         pthread_mutex_unlock(&clone_lock);
6722     } else {
6723         /* if no CLONE_VM, we consider it is a fork */
6724         if (flags & CLONE_INVALID_FORK_FLAGS) {
6725             return -TARGET_EINVAL;
6726         }
6727 
6728         /* We can't support custom termination signals */
6729         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6730             return -TARGET_EINVAL;
6731         }
6732 
6733         if (block_signals()) {
6734             return -QEMU_ERESTARTSYS;
6735         }
6736 
6737         fork_start();
6738         ret = fork();
6739         if (ret == 0) {
6740             /* Child Process.  */
6741             cpu_clone_regs_child(env, newsp, flags);
6742             fork_end(1);
6743             /* There is a race condition here.  The parent process could
6744                theoretically read the TID in the child process before the child
6745                tid is set.  This would require using either ptrace
6746                (not implemented) or having *_tidptr to point at a shared memory
6747                mapping.  We can't repeat the spinlock hack used above because
6748                the child process gets its own copy of the lock.  */
6749             if (flags & CLONE_CHILD_SETTID)
6750                 put_user_u32(sys_gettid(), child_tidptr);
6751             if (flags & CLONE_PARENT_SETTID)
6752                 put_user_u32(sys_gettid(), parent_tidptr);
6753             ts = (TaskState *)cpu->opaque;
6754             if (flags & CLONE_SETTLS)
6755                 cpu_set_tls (env, newtls);
6756             if (flags & CLONE_CHILD_CLEARTID)
6757                 ts->child_tidptr = child_tidptr;
6758         } else {
6759             cpu_clone_regs_parent(env, flags);
6760             fork_end(0);
6761         }
6762         g_assert(!cpu_in_exclusive_context(cpu));
6763     }
6764     return ret;
6765 }
6766 
6767 /* warning : doesn't handle linux specific flags... */
6768 static int target_to_host_fcntl_cmd(int cmd)
6769 {
6770     int ret;
6771 
6772     switch(cmd) {
6773     case TARGET_F_DUPFD:
6774     case TARGET_F_GETFD:
6775     case TARGET_F_SETFD:
6776     case TARGET_F_GETFL:
6777     case TARGET_F_SETFL:
6778     case TARGET_F_OFD_GETLK:
6779     case TARGET_F_OFD_SETLK:
6780     case TARGET_F_OFD_SETLKW:
6781         ret = cmd;
6782         break;
6783     case TARGET_F_GETLK:
6784         ret = F_GETLK64;
6785         break;
6786     case TARGET_F_SETLK:
6787         ret = F_SETLK64;
6788         break;
6789     case TARGET_F_SETLKW:
6790         ret = F_SETLKW64;
6791         break;
6792     case TARGET_F_GETOWN:
6793         ret = F_GETOWN;
6794         break;
6795     case TARGET_F_SETOWN:
6796         ret = F_SETOWN;
6797         break;
6798     case TARGET_F_GETSIG:
6799         ret = F_GETSIG;
6800         break;
6801     case TARGET_F_SETSIG:
6802         ret = F_SETSIG;
6803         break;
6804 #if TARGET_ABI_BITS == 32
6805     case TARGET_F_GETLK64:
6806         ret = F_GETLK64;
6807         break;
6808     case TARGET_F_SETLK64:
6809         ret = F_SETLK64;
6810         break;
6811     case TARGET_F_SETLKW64:
6812         ret = F_SETLKW64;
6813         break;
6814 #endif
6815     case TARGET_F_SETLEASE:
6816         ret = F_SETLEASE;
6817         break;
6818     case TARGET_F_GETLEASE:
6819         ret = F_GETLEASE;
6820         break;
6821 #ifdef F_DUPFD_CLOEXEC
6822     case TARGET_F_DUPFD_CLOEXEC:
6823         ret = F_DUPFD_CLOEXEC;
6824         break;
6825 #endif
6826     case TARGET_F_NOTIFY:
6827         ret = F_NOTIFY;
6828         break;
6829 #ifdef F_GETOWN_EX
6830     case TARGET_F_GETOWN_EX:
6831         ret = F_GETOWN_EX;
6832         break;
6833 #endif
6834 #ifdef F_SETOWN_EX
6835     case TARGET_F_SETOWN_EX:
6836         ret = F_SETOWN_EX;
6837         break;
6838 #endif
6839 #ifdef F_SETPIPE_SZ
6840     case TARGET_F_SETPIPE_SZ:
6841         ret = F_SETPIPE_SZ;
6842         break;
6843     case TARGET_F_GETPIPE_SZ:
6844         ret = F_GETPIPE_SZ;
6845         break;
6846 #endif
6847 #ifdef F_ADD_SEALS
6848     case TARGET_F_ADD_SEALS:
6849         ret = F_ADD_SEALS;
6850         break;
6851     case TARGET_F_GET_SEALS:
6852         ret = F_GET_SEALS;
6853         break;
6854 #endif
6855     default:
6856         ret = -TARGET_EINVAL;
6857         break;
6858     }
6859 
6860 #if defined(__powerpc64__)
6861     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6862      * is not supported by kernel. The glibc fcntl call actually adjusts
6863      * them to 5, 6 and 7 before making the syscall(). Since we make the
6864      * syscall directly, adjust to what is supported by the kernel.
6865      */
6866     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6867         ret -= F_GETLK64 - 5;
6868     }
6869 #endif
6870 
6871     return ret;
6872 }
6873 
6874 #define FLOCK_TRANSTBL \
6875     switch (type) { \
6876     TRANSTBL_CONVERT(F_RDLCK); \
6877     TRANSTBL_CONVERT(F_WRLCK); \
6878     TRANSTBL_CONVERT(F_UNLCK); \
6879     }
6880 
6881 static int target_to_host_flock(int type)
6882 {
6883 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6884     FLOCK_TRANSTBL
6885 #undef  TRANSTBL_CONVERT
6886     return -TARGET_EINVAL;
6887 }
6888 
6889 static int host_to_target_flock(int type)
6890 {
6891 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6892     FLOCK_TRANSTBL
6893 #undef  TRANSTBL_CONVERT
6894     /* if we don't know how to convert the value coming
6895      * from the host we copy to the target field as-is
6896      */
6897     return type;
6898 }
6899 
6900 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6901                                             abi_ulong target_flock_addr)
6902 {
6903     struct target_flock *target_fl;
6904     int l_type;
6905 
6906     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6907         return -TARGET_EFAULT;
6908     }
6909 
6910     __get_user(l_type, &target_fl->l_type);
6911     l_type = target_to_host_flock(l_type);
6912     if (l_type < 0) {
6913         return l_type;
6914     }
6915     fl->l_type = l_type;
6916     __get_user(fl->l_whence, &target_fl->l_whence);
6917     __get_user(fl->l_start, &target_fl->l_start);
6918     __get_user(fl->l_len, &target_fl->l_len);
6919     __get_user(fl->l_pid, &target_fl->l_pid);
6920     unlock_user_struct(target_fl, target_flock_addr, 0);
6921     return 0;
6922 }
6923 
6924 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6925                                           const struct flock64 *fl)
6926 {
6927     struct target_flock *target_fl;
6928     short l_type;
6929 
6930     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6931         return -TARGET_EFAULT;
6932     }
6933 
6934     l_type = host_to_target_flock(fl->l_type);
6935     __put_user(l_type, &target_fl->l_type);
6936     __put_user(fl->l_whence, &target_fl->l_whence);
6937     __put_user(fl->l_start, &target_fl->l_start);
6938     __put_user(fl->l_len, &target_fl->l_len);
6939     __put_user(fl->l_pid, &target_fl->l_pid);
6940     unlock_user_struct(target_fl, target_flock_addr, 1);
6941     return 0;
6942 }
6943 
6944 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6945 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6946 
6947 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6948 struct target_oabi_flock64 {
6949     abi_short l_type;
6950     abi_short l_whence;
6951     abi_llong l_start;
6952     abi_llong l_len;
6953     abi_int   l_pid;
6954 } QEMU_PACKED;
6955 
6956 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6957                                                    abi_ulong target_flock_addr)
6958 {
6959     struct target_oabi_flock64 *target_fl;
6960     int l_type;
6961 
6962     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6963         return -TARGET_EFAULT;
6964     }
6965 
6966     __get_user(l_type, &target_fl->l_type);
6967     l_type = target_to_host_flock(l_type);
6968     if (l_type < 0) {
6969         return l_type;
6970     }
6971     fl->l_type = l_type;
6972     __get_user(fl->l_whence, &target_fl->l_whence);
6973     __get_user(fl->l_start, &target_fl->l_start);
6974     __get_user(fl->l_len, &target_fl->l_len);
6975     __get_user(fl->l_pid, &target_fl->l_pid);
6976     unlock_user_struct(target_fl, target_flock_addr, 0);
6977     return 0;
6978 }
6979 
6980 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6981                                                  const struct flock64 *fl)
6982 {
6983     struct target_oabi_flock64 *target_fl;
6984     short l_type;
6985 
6986     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6987         return -TARGET_EFAULT;
6988     }
6989 
6990     l_type = host_to_target_flock(fl->l_type);
6991     __put_user(l_type, &target_fl->l_type);
6992     __put_user(fl->l_whence, &target_fl->l_whence);
6993     __put_user(fl->l_start, &target_fl->l_start);
6994     __put_user(fl->l_len, &target_fl->l_len);
6995     __put_user(fl->l_pid, &target_fl->l_pid);
6996     unlock_user_struct(target_fl, target_flock_addr, 1);
6997     return 0;
6998 }
6999 #endif
7000 
7001 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7002                                               abi_ulong target_flock_addr)
7003 {
7004     struct target_flock64 *target_fl;
7005     int l_type;
7006 
7007     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7008         return -TARGET_EFAULT;
7009     }
7010 
7011     __get_user(l_type, &target_fl->l_type);
7012     l_type = target_to_host_flock(l_type);
7013     if (l_type < 0) {
7014         return l_type;
7015     }
7016     fl->l_type = l_type;
7017     __get_user(fl->l_whence, &target_fl->l_whence);
7018     __get_user(fl->l_start, &target_fl->l_start);
7019     __get_user(fl->l_len, &target_fl->l_len);
7020     __get_user(fl->l_pid, &target_fl->l_pid);
7021     unlock_user_struct(target_fl, target_flock_addr, 0);
7022     return 0;
7023 }
7024 
7025 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7026                                             const struct flock64 *fl)
7027 {
7028     struct target_flock64 *target_fl;
7029     short l_type;
7030 
7031     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7032         return -TARGET_EFAULT;
7033     }
7034 
7035     l_type = host_to_target_flock(fl->l_type);
7036     __put_user(l_type, &target_fl->l_type);
7037     __put_user(fl->l_whence, &target_fl->l_whence);
7038     __put_user(fl->l_start, &target_fl->l_start);
7039     __put_user(fl->l_len, &target_fl->l_len);
7040     __put_user(fl->l_pid, &target_fl->l_pid);
7041     unlock_user_struct(target_fl, target_flock_addr, 1);
7042     return 0;
7043 }
7044 
7045 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7046 {
7047     struct flock64 fl64;
7048 #ifdef F_GETOWN_EX
7049     struct f_owner_ex fox;
7050     struct target_f_owner_ex *target_fox;
7051 #endif
7052     abi_long ret;
7053     int host_cmd = target_to_host_fcntl_cmd(cmd);
7054 
7055     if (host_cmd == -TARGET_EINVAL)
7056 	    return host_cmd;
7057 
7058     switch(cmd) {
7059     case TARGET_F_GETLK:
7060         ret = copy_from_user_flock(&fl64, arg);
7061         if (ret) {
7062             return ret;
7063         }
7064         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7065         if (ret == 0) {
7066             ret = copy_to_user_flock(arg, &fl64);
7067         }
7068         break;
7069 
7070     case TARGET_F_SETLK:
7071     case TARGET_F_SETLKW:
7072         ret = copy_from_user_flock(&fl64, arg);
7073         if (ret) {
7074             return ret;
7075         }
7076         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7077         break;
7078 
7079     case TARGET_F_GETLK64:
7080     case TARGET_F_OFD_GETLK:
7081         ret = copy_from_user_flock64(&fl64, arg);
7082         if (ret) {
7083             return ret;
7084         }
7085         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7086         if (ret == 0) {
7087             ret = copy_to_user_flock64(arg, &fl64);
7088         }
7089         break;
7090     case TARGET_F_SETLK64:
7091     case TARGET_F_SETLKW64:
7092     case TARGET_F_OFD_SETLK:
7093     case TARGET_F_OFD_SETLKW:
7094         ret = copy_from_user_flock64(&fl64, arg);
7095         if (ret) {
7096             return ret;
7097         }
7098         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7099         break;
7100 
7101     case TARGET_F_GETFL:
7102         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7103         if (ret >= 0) {
7104             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7105         }
7106         break;
7107 
7108     case TARGET_F_SETFL:
7109         ret = get_errno(safe_fcntl(fd, host_cmd,
7110                                    target_to_host_bitmask(arg,
7111                                                           fcntl_flags_tbl)));
7112         break;
7113 
7114 #ifdef F_GETOWN_EX
7115     case TARGET_F_GETOWN_EX:
7116         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7117         if (ret >= 0) {
7118             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7119                 return -TARGET_EFAULT;
7120             target_fox->type = tswap32(fox.type);
7121             target_fox->pid = tswap32(fox.pid);
7122             unlock_user_struct(target_fox, arg, 1);
7123         }
7124         break;
7125 #endif
7126 
7127 #ifdef F_SETOWN_EX
7128     case TARGET_F_SETOWN_EX:
7129         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7130             return -TARGET_EFAULT;
7131         fox.type = tswap32(target_fox->type);
7132         fox.pid = tswap32(target_fox->pid);
7133         unlock_user_struct(target_fox, arg, 0);
7134         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7135         break;
7136 #endif
7137 
7138     case TARGET_F_SETSIG:
7139         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7140         break;
7141 
7142     case TARGET_F_GETSIG:
7143         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7144         break;
7145 
7146     case TARGET_F_SETOWN:
7147     case TARGET_F_GETOWN:
7148     case TARGET_F_SETLEASE:
7149     case TARGET_F_GETLEASE:
7150     case TARGET_F_SETPIPE_SZ:
7151     case TARGET_F_GETPIPE_SZ:
7152     case TARGET_F_ADD_SEALS:
7153     case TARGET_F_GET_SEALS:
7154         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7155         break;
7156 
7157     default:
7158         ret = get_errno(safe_fcntl(fd, cmd, arg));
7159         break;
7160     }
7161     return ret;
7162 }
7163 
7164 #ifdef USE_UID16
7165 
7166 static inline int high2lowuid(int uid)
7167 {
7168     if (uid > 65535)
7169         return 65534;
7170     else
7171         return uid;
7172 }
7173 
7174 static inline int high2lowgid(int gid)
7175 {
7176     if (gid > 65535)
7177         return 65534;
7178     else
7179         return gid;
7180 }
7181 
7182 static inline int low2highuid(int uid)
7183 {
7184     if ((int16_t)uid == -1)
7185         return -1;
7186     else
7187         return uid;
7188 }
7189 
7190 static inline int low2highgid(int gid)
7191 {
7192     if ((int16_t)gid == -1)
7193         return -1;
7194     else
7195         return gid;
7196 }
7197 static inline int tswapid(int id)
7198 {
7199     return tswap16(id);
7200 }
7201 
7202 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7203 
7204 #else /* !USE_UID16 */
7205 static inline int high2lowuid(int uid)
7206 {
7207     return uid;
7208 }
7209 static inline int high2lowgid(int gid)
7210 {
7211     return gid;
7212 }
7213 static inline int low2highuid(int uid)
7214 {
7215     return uid;
7216 }
7217 static inline int low2highgid(int gid)
7218 {
7219     return gid;
7220 }
7221 static inline int tswapid(int id)
7222 {
7223     return tswap32(id);
7224 }
7225 
7226 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7227 
7228 #endif /* USE_UID16 */
7229 
7230 /* We must do direct syscalls for setting UID/GID, because we want to
7231  * implement the Linux system call semantics of "change only for this thread",
7232  * not the libc/POSIX semantics of "change for all threads in process".
7233  * (See http://ewontfix.com/17/ for more details.)
7234  * We use the 32-bit version of the syscalls if present; if it is not
7235  * then either the host architecture supports 32-bit UIDs natively with
7236  * the standard syscall, or the 16-bit UID is the best we can do.
7237  */
7238 #ifdef __NR_setuid32
7239 #define __NR_sys_setuid __NR_setuid32
7240 #else
7241 #define __NR_sys_setuid __NR_setuid
7242 #endif
7243 #ifdef __NR_setgid32
7244 #define __NR_sys_setgid __NR_setgid32
7245 #else
7246 #define __NR_sys_setgid __NR_setgid
7247 #endif
7248 #ifdef __NR_setresuid32
7249 #define __NR_sys_setresuid __NR_setresuid32
7250 #else
7251 #define __NR_sys_setresuid __NR_setresuid
7252 #endif
7253 #ifdef __NR_setresgid32
7254 #define __NR_sys_setresgid __NR_setresgid32
7255 #else
7256 #define __NR_sys_setresgid __NR_setresgid
7257 #endif
7258 
7259 _syscall1(int, sys_setuid, uid_t, uid)
7260 _syscall1(int, sys_setgid, gid_t, gid)
7261 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7262 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7263 
7264 void syscall_init(void)
7265 {
7266     IOCTLEntry *ie;
7267     const argtype *arg_type;
7268     int size;
7269 
7270     thunk_init(STRUCT_MAX);
7271 
7272 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7273 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7274 #include "syscall_types.h"
7275 #undef STRUCT
7276 #undef STRUCT_SPECIAL
7277 
7278     /* we patch the ioctl size if necessary. We rely on the fact that
7279        no ioctl has all the bits at '1' in the size field */
7280     ie = ioctl_entries;
7281     while (ie->target_cmd != 0) {
7282         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7283             TARGET_IOC_SIZEMASK) {
7284             arg_type = ie->arg_type;
7285             if (arg_type[0] != TYPE_PTR) {
7286                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7287                         ie->target_cmd);
7288                 exit(1);
7289             }
7290             arg_type++;
7291             size = thunk_type_size(arg_type, 0);
7292             ie->target_cmd = (ie->target_cmd &
7293                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7294                 (size << TARGET_IOC_SIZESHIFT);
7295         }
7296 
7297         /* automatic consistency check if same arch */
7298 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7299     (defined(__x86_64__) && defined(TARGET_X86_64))
7300         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7301             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7302                     ie->name, ie->target_cmd, ie->host_cmd);
7303         }
7304 #endif
7305         ie++;
7306     }
7307 }
7308 
7309 #ifdef TARGET_NR_truncate64
7310 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7311                                          abi_long arg2,
7312                                          abi_long arg3,
7313                                          abi_long arg4)
7314 {
7315     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7316         arg2 = arg3;
7317         arg3 = arg4;
7318     }
7319     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7320 }
7321 #endif
7322 
7323 #ifdef TARGET_NR_ftruncate64
7324 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7325                                           abi_long arg2,
7326                                           abi_long arg3,
7327                                           abi_long arg4)
7328 {
7329     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7330         arg2 = arg3;
7331         arg3 = arg4;
7332     }
7333     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7334 }
7335 #endif
7336 
7337 #if defined(TARGET_NR_timer_settime) || \
7338     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7339 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7340                                                  abi_ulong target_addr)
7341 {
7342     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7343                                 offsetof(struct target_itimerspec,
7344                                          it_interval)) ||
7345         target_to_host_timespec(&host_its->it_value, target_addr +
7346                                 offsetof(struct target_itimerspec,
7347                                          it_value))) {
7348         return -TARGET_EFAULT;
7349     }
7350 
7351     return 0;
7352 }
7353 #endif
7354 
7355 #if defined(TARGET_NR_timer_settime64) || \
7356     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7357 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7358                                                    abi_ulong target_addr)
7359 {
7360     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7361                                   offsetof(struct target__kernel_itimerspec,
7362                                            it_interval)) ||
7363         target_to_host_timespec64(&host_its->it_value, target_addr +
7364                                   offsetof(struct target__kernel_itimerspec,
7365                                            it_value))) {
7366         return -TARGET_EFAULT;
7367     }
7368 
7369     return 0;
7370 }
7371 #endif
7372 
7373 #if ((defined(TARGET_NR_timerfd_gettime) || \
7374       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7375       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7376 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7377                                                  struct itimerspec *host_its)
7378 {
7379     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7380                                                        it_interval),
7381                                 &host_its->it_interval) ||
7382         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7383                                                        it_value),
7384                                 &host_its->it_value)) {
7385         return -TARGET_EFAULT;
7386     }
7387     return 0;
7388 }
7389 #endif
7390 
7391 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7392       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7393       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7394 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7395                                                    struct itimerspec *host_its)
7396 {
7397     if (host_to_target_timespec64(target_addr +
7398                                   offsetof(struct target__kernel_itimerspec,
7399                                            it_interval),
7400                                   &host_its->it_interval) ||
7401         host_to_target_timespec64(target_addr +
7402                                   offsetof(struct target__kernel_itimerspec,
7403                                            it_value),
7404                                   &host_its->it_value)) {
7405         return -TARGET_EFAULT;
7406     }
7407     return 0;
7408 }
7409 #endif
7410 
7411 #if defined(TARGET_NR_adjtimex) || \
7412     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7413 static inline abi_long target_to_host_timex(struct timex *host_tx,
7414                                             abi_long target_addr)
7415 {
7416     struct target_timex *target_tx;
7417 
7418     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7419         return -TARGET_EFAULT;
7420     }
7421 
7422     __get_user(host_tx->modes, &target_tx->modes);
7423     __get_user(host_tx->offset, &target_tx->offset);
7424     __get_user(host_tx->freq, &target_tx->freq);
7425     __get_user(host_tx->maxerror, &target_tx->maxerror);
7426     __get_user(host_tx->esterror, &target_tx->esterror);
7427     __get_user(host_tx->status, &target_tx->status);
7428     __get_user(host_tx->constant, &target_tx->constant);
7429     __get_user(host_tx->precision, &target_tx->precision);
7430     __get_user(host_tx->tolerance, &target_tx->tolerance);
7431     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7432     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7433     __get_user(host_tx->tick, &target_tx->tick);
7434     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7435     __get_user(host_tx->jitter, &target_tx->jitter);
7436     __get_user(host_tx->shift, &target_tx->shift);
7437     __get_user(host_tx->stabil, &target_tx->stabil);
7438     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7439     __get_user(host_tx->calcnt, &target_tx->calcnt);
7440     __get_user(host_tx->errcnt, &target_tx->errcnt);
7441     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7442     __get_user(host_tx->tai, &target_tx->tai);
7443 
7444     unlock_user_struct(target_tx, target_addr, 0);
7445     return 0;
7446 }
7447 
7448 static inline abi_long host_to_target_timex(abi_long target_addr,
7449                                             struct timex *host_tx)
7450 {
7451     struct target_timex *target_tx;
7452 
7453     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7454         return -TARGET_EFAULT;
7455     }
7456 
7457     __put_user(host_tx->modes, &target_tx->modes);
7458     __put_user(host_tx->offset, &target_tx->offset);
7459     __put_user(host_tx->freq, &target_tx->freq);
7460     __put_user(host_tx->maxerror, &target_tx->maxerror);
7461     __put_user(host_tx->esterror, &target_tx->esterror);
7462     __put_user(host_tx->status, &target_tx->status);
7463     __put_user(host_tx->constant, &target_tx->constant);
7464     __put_user(host_tx->precision, &target_tx->precision);
7465     __put_user(host_tx->tolerance, &target_tx->tolerance);
7466     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7467     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7468     __put_user(host_tx->tick, &target_tx->tick);
7469     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7470     __put_user(host_tx->jitter, &target_tx->jitter);
7471     __put_user(host_tx->shift, &target_tx->shift);
7472     __put_user(host_tx->stabil, &target_tx->stabil);
7473     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7474     __put_user(host_tx->calcnt, &target_tx->calcnt);
7475     __put_user(host_tx->errcnt, &target_tx->errcnt);
7476     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7477     __put_user(host_tx->tai, &target_tx->tai);
7478 
7479     unlock_user_struct(target_tx, target_addr, 1);
7480     return 0;
7481 }
7482 #endif
7483 
7484 
7485 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7486 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7487                                               abi_long target_addr)
7488 {
7489     struct target__kernel_timex *target_tx;
7490 
7491     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7492                                  offsetof(struct target__kernel_timex,
7493                                           time))) {
7494         return -TARGET_EFAULT;
7495     }
7496 
7497     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7498         return -TARGET_EFAULT;
7499     }
7500 
7501     __get_user(host_tx->modes, &target_tx->modes);
7502     __get_user(host_tx->offset, &target_tx->offset);
7503     __get_user(host_tx->freq, &target_tx->freq);
7504     __get_user(host_tx->maxerror, &target_tx->maxerror);
7505     __get_user(host_tx->esterror, &target_tx->esterror);
7506     __get_user(host_tx->status, &target_tx->status);
7507     __get_user(host_tx->constant, &target_tx->constant);
7508     __get_user(host_tx->precision, &target_tx->precision);
7509     __get_user(host_tx->tolerance, &target_tx->tolerance);
7510     __get_user(host_tx->tick, &target_tx->tick);
7511     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7512     __get_user(host_tx->jitter, &target_tx->jitter);
7513     __get_user(host_tx->shift, &target_tx->shift);
7514     __get_user(host_tx->stabil, &target_tx->stabil);
7515     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7516     __get_user(host_tx->calcnt, &target_tx->calcnt);
7517     __get_user(host_tx->errcnt, &target_tx->errcnt);
7518     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7519     __get_user(host_tx->tai, &target_tx->tai);
7520 
7521     unlock_user_struct(target_tx, target_addr, 0);
7522     return 0;
7523 }
7524 
7525 static inline abi_long host_to_target_timex64(abi_long target_addr,
7526                                               struct timex *host_tx)
7527 {
7528     struct target__kernel_timex *target_tx;
7529 
7530    if (copy_to_user_timeval64(target_addr +
7531                               offsetof(struct target__kernel_timex, time),
7532                               &host_tx->time)) {
7533         return -TARGET_EFAULT;
7534     }
7535 
7536     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7537         return -TARGET_EFAULT;
7538     }
7539 
7540     __put_user(host_tx->modes, &target_tx->modes);
7541     __put_user(host_tx->offset, &target_tx->offset);
7542     __put_user(host_tx->freq, &target_tx->freq);
7543     __put_user(host_tx->maxerror, &target_tx->maxerror);
7544     __put_user(host_tx->esterror, &target_tx->esterror);
7545     __put_user(host_tx->status, &target_tx->status);
7546     __put_user(host_tx->constant, &target_tx->constant);
7547     __put_user(host_tx->precision, &target_tx->precision);
7548     __put_user(host_tx->tolerance, &target_tx->tolerance);
7549     __put_user(host_tx->tick, &target_tx->tick);
7550     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7551     __put_user(host_tx->jitter, &target_tx->jitter);
7552     __put_user(host_tx->shift, &target_tx->shift);
7553     __put_user(host_tx->stabil, &target_tx->stabil);
7554     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7555     __put_user(host_tx->calcnt, &target_tx->calcnt);
7556     __put_user(host_tx->errcnt, &target_tx->errcnt);
7557     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7558     __put_user(host_tx->tai, &target_tx->tai);
7559 
7560     unlock_user_struct(target_tx, target_addr, 1);
7561     return 0;
7562 }
7563 #endif
7564 
7565 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7566 #define sigev_notify_thread_id _sigev_un._tid
7567 #endif
7568 
7569 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7570                                                abi_ulong target_addr)
7571 {
7572     struct target_sigevent *target_sevp;
7573 
7574     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7575         return -TARGET_EFAULT;
7576     }
7577 
7578     /* This union is awkward on 64 bit systems because it has a 32 bit
7579      * integer and a pointer in it; we follow the conversion approach
7580      * used for handling sigval types in signal.c so the guest should get
7581      * the correct value back even if we did a 64 bit byteswap and it's
7582      * using the 32 bit integer.
7583      */
7584     host_sevp->sigev_value.sival_ptr =
7585         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7586     host_sevp->sigev_signo =
7587         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7588     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7589     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7590 
7591     unlock_user_struct(target_sevp, target_addr, 1);
7592     return 0;
7593 }
7594 
7595 #if defined(TARGET_NR_mlockall)
7596 static inline int target_to_host_mlockall_arg(int arg)
7597 {
7598     int result = 0;
7599 
7600     if (arg & TARGET_MCL_CURRENT) {
7601         result |= MCL_CURRENT;
7602     }
7603     if (arg & TARGET_MCL_FUTURE) {
7604         result |= MCL_FUTURE;
7605     }
7606 #ifdef MCL_ONFAULT
7607     if (arg & TARGET_MCL_ONFAULT) {
7608         result |= MCL_ONFAULT;
7609     }
7610 #endif
7611 
7612     return result;
7613 }
7614 #endif
7615 
7616 static inline int target_to_host_msync_arg(abi_long arg)
7617 {
7618     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7619            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7620            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7621            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7622 }
7623 
7624 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7625      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7626      defined(TARGET_NR_newfstatat))
7627 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7628                                              abi_ulong target_addr,
7629                                              struct stat *host_st)
7630 {
7631 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7632     if (cpu_env->eabi) {
7633         struct target_eabi_stat64 *target_st;
7634 
7635         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7636             return -TARGET_EFAULT;
7637         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7638         __put_user(host_st->st_dev, &target_st->st_dev);
7639         __put_user(host_st->st_ino, &target_st->st_ino);
7640 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7641         __put_user(host_st->st_ino, &target_st->__st_ino);
7642 #endif
7643         __put_user(host_st->st_mode, &target_st->st_mode);
7644         __put_user(host_st->st_nlink, &target_st->st_nlink);
7645         __put_user(host_st->st_uid, &target_st->st_uid);
7646         __put_user(host_st->st_gid, &target_st->st_gid);
7647         __put_user(host_st->st_rdev, &target_st->st_rdev);
7648         __put_user(host_st->st_size, &target_st->st_size);
7649         __put_user(host_st->st_blksize, &target_st->st_blksize);
7650         __put_user(host_st->st_blocks, &target_st->st_blocks);
7651         __put_user(host_st->st_atime, &target_st->target_st_atime);
7652         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7653         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7654 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7655         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7656         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7657         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7658 #endif
7659         unlock_user_struct(target_st, target_addr, 1);
7660     } else
7661 #endif
7662     {
7663 #if defined(TARGET_HAS_STRUCT_STAT64)
7664         struct target_stat64 *target_st;
7665 #else
7666         struct target_stat *target_st;
7667 #endif
7668 
7669         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7670             return -TARGET_EFAULT;
7671         memset(target_st, 0, sizeof(*target_st));
7672         __put_user(host_st->st_dev, &target_st->st_dev);
7673         __put_user(host_st->st_ino, &target_st->st_ino);
7674 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7675         __put_user(host_st->st_ino, &target_st->__st_ino);
7676 #endif
7677         __put_user(host_st->st_mode, &target_st->st_mode);
7678         __put_user(host_st->st_nlink, &target_st->st_nlink);
7679         __put_user(host_st->st_uid, &target_st->st_uid);
7680         __put_user(host_st->st_gid, &target_st->st_gid);
7681         __put_user(host_st->st_rdev, &target_st->st_rdev);
7682         /* XXX: better use of kernel struct */
7683         __put_user(host_st->st_size, &target_st->st_size);
7684         __put_user(host_st->st_blksize, &target_st->st_blksize);
7685         __put_user(host_st->st_blocks, &target_st->st_blocks);
7686         __put_user(host_st->st_atime, &target_st->target_st_atime);
7687         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7688         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7689 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7690         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7691         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7692         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7693 #endif
7694         unlock_user_struct(target_st, target_addr, 1);
7695     }
7696 
7697     return 0;
7698 }
7699 #endif
7700 
7701 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7702 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7703                                             abi_ulong target_addr)
7704 {
7705     struct target_statx *target_stx;
7706 
7707     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7708         return -TARGET_EFAULT;
7709     }
7710     memset(target_stx, 0, sizeof(*target_stx));
7711 
7712     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7713     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7714     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7715     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7716     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7717     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7718     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7719     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7720     __put_user(host_stx->stx_size, &target_stx->stx_size);
7721     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7722     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7723     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7724     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7725     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7726     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7727     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7728     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7729     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7730     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7731     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7732     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7733     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7734     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7735 
7736     unlock_user_struct(target_stx, target_addr, 1);
7737 
7738     return 0;
7739 }
7740 #endif
7741 
7742 static int do_sys_futex(int *uaddr, int op, int val,
7743                          const struct timespec *timeout, int *uaddr2,
7744                          int val3)
7745 {
7746 #if HOST_LONG_BITS == 64
7747 #if defined(__NR_futex)
7748     /* always a 64-bit time_t, it doesn't define _time64 version  */
7749     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7750 
7751 #endif
7752 #else /* HOST_LONG_BITS == 64 */
7753 #if defined(__NR_futex_time64)
7754     if (sizeof(timeout->tv_sec) == 8) {
7755         /* _time64 function on 32bit arch */
7756         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7757     }
7758 #endif
7759 #if defined(__NR_futex)
7760     /* old function on 32bit arch */
7761     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7762 #endif
7763 #endif /* HOST_LONG_BITS == 64 */
7764     g_assert_not_reached();
7765 }
7766 
7767 static int do_safe_futex(int *uaddr, int op, int val,
7768                          const struct timespec *timeout, int *uaddr2,
7769                          int val3)
7770 {
7771 #if HOST_LONG_BITS == 64
7772 #if defined(__NR_futex)
7773     /* always a 64-bit time_t, it doesn't define _time64 version  */
7774     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7775 #endif
7776 #else /* HOST_LONG_BITS == 64 */
7777 #if defined(__NR_futex_time64)
7778     if (sizeof(timeout->tv_sec) == 8) {
7779         /* _time64 function on 32bit arch */
7780         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7781                                            val3));
7782     }
7783 #endif
7784 #if defined(__NR_futex)
7785     /* old function on 32bit arch */
7786     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7787 #endif
7788 #endif /* HOST_LONG_BITS == 64 */
7789     return -TARGET_ENOSYS;
7790 }
7791 
7792 /* ??? Using host futex calls even when target atomic operations
7793    are not really atomic probably breaks things.  However implementing
7794    futexes locally would make futexes shared between multiple processes
7795    tricky.  However they're probably useless because guest atomic
7796    operations won't work either.  */
7797 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7798 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7799                     int op, int val, target_ulong timeout,
7800                     target_ulong uaddr2, int val3)
7801 {
7802     struct timespec ts, *pts = NULL;
7803     void *haddr2 = NULL;
7804     int base_op;
7805 
7806     /* We assume FUTEX_* constants are the same on both host and target. */
7807 #ifdef FUTEX_CMD_MASK
7808     base_op = op & FUTEX_CMD_MASK;
7809 #else
7810     base_op = op;
7811 #endif
7812     switch (base_op) {
7813     case FUTEX_WAIT:
7814     case FUTEX_WAIT_BITSET:
7815         val = tswap32(val);
7816         break;
7817     case FUTEX_WAIT_REQUEUE_PI:
7818         val = tswap32(val);
7819         haddr2 = g2h(cpu, uaddr2);
7820         break;
7821     case FUTEX_LOCK_PI:
7822     case FUTEX_LOCK_PI2:
7823         break;
7824     case FUTEX_WAKE:
7825     case FUTEX_WAKE_BITSET:
7826     case FUTEX_TRYLOCK_PI:
7827     case FUTEX_UNLOCK_PI:
7828         timeout = 0;
7829         break;
7830     case FUTEX_FD:
7831         val = target_to_host_signal(val);
7832         timeout = 0;
7833         break;
7834     case FUTEX_CMP_REQUEUE:
7835     case FUTEX_CMP_REQUEUE_PI:
7836         val3 = tswap32(val3);
7837         /* fall through */
7838     case FUTEX_REQUEUE:
7839     case FUTEX_WAKE_OP:
7840         /*
7841          * For these, the 4th argument is not TIMEOUT, but VAL2.
7842          * But the prototype of do_safe_futex takes a pointer, so
7843          * insert casts to satisfy the compiler.  We do not need
7844          * to tswap VAL2 since it's not compared to guest memory.
7845           */
7846         pts = (struct timespec *)(uintptr_t)timeout;
7847         timeout = 0;
7848         haddr2 = g2h(cpu, uaddr2);
7849         break;
7850     default:
7851         return -TARGET_ENOSYS;
7852     }
7853     if (timeout) {
7854         pts = &ts;
7855         if (time64
7856             ? target_to_host_timespec64(pts, timeout)
7857             : target_to_host_timespec(pts, timeout)) {
7858             return -TARGET_EFAULT;
7859         }
7860     }
7861     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7862 }
7863 #endif
7864 
7865 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7866 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7867                                      abi_long handle, abi_long mount_id,
7868                                      abi_long flags)
7869 {
7870     struct file_handle *target_fh;
7871     struct file_handle *fh;
7872     int mid = 0;
7873     abi_long ret;
7874     char *name;
7875     unsigned int size, total_size;
7876 
7877     if (get_user_s32(size, handle)) {
7878         return -TARGET_EFAULT;
7879     }
7880 
7881     name = lock_user_string(pathname);
7882     if (!name) {
7883         return -TARGET_EFAULT;
7884     }
7885 
7886     total_size = sizeof(struct file_handle) + size;
7887     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7888     if (!target_fh) {
7889         unlock_user(name, pathname, 0);
7890         return -TARGET_EFAULT;
7891     }
7892 
7893     fh = g_malloc0(total_size);
7894     fh->handle_bytes = size;
7895 
7896     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7897     unlock_user(name, pathname, 0);
7898 
7899     /* man name_to_handle_at(2):
7900      * Other than the use of the handle_bytes field, the caller should treat
7901      * the file_handle structure as an opaque data type
7902      */
7903 
7904     memcpy(target_fh, fh, total_size);
7905     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7906     target_fh->handle_type = tswap32(fh->handle_type);
7907     g_free(fh);
7908     unlock_user(target_fh, handle, total_size);
7909 
7910     if (put_user_s32(mid, mount_id)) {
7911         return -TARGET_EFAULT;
7912     }
7913 
7914     return ret;
7915 
7916 }
7917 #endif
7918 
7919 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7920 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7921                                      abi_long flags)
7922 {
7923     struct file_handle *target_fh;
7924     struct file_handle *fh;
7925     unsigned int size, total_size;
7926     abi_long ret;
7927 
7928     if (get_user_s32(size, handle)) {
7929         return -TARGET_EFAULT;
7930     }
7931 
7932     total_size = sizeof(struct file_handle) + size;
7933     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7934     if (!target_fh) {
7935         return -TARGET_EFAULT;
7936     }
7937 
7938     fh = g_memdup(target_fh, total_size);
7939     fh->handle_bytes = size;
7940     fh->handle_type = tswap32(target_fh->handle_type);
7941 
7942     ret = get_errno(open_by_handle_at(mount_fd, fh,
7943                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7944 
7945     g_free(fh);
7946 
7947     unlock_user(target_fh, handle, total_size);
7948 
7949     return ret;
7950 }
7951 #endif
7952 
7953 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7954 
7955 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7956 {
7957     int host_flags;
7958     target_sigset_t *target_mask;
7959     sigset_t host_mask;
7960     abi_long ret;
7961 
7962     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7963         return -TARGET_EINVAL;
7964     }
7965     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7966         return -TARGET_EFAULT;
7967     }
7968 
7969     target_to_host_sigset(&host_mask, target_mask);
7970 
7971     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7972 
7973     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7974     if (ret >= 0) {
7975         fd_trans_register(ret, &target_signalfd_trans);
7976     }
7977 
7978     unlock_user_struct(target_mask, mask, 0);
7979 
7980     return ret;
7981 }
7982 #endif
7983 
7984 /* Map host to target signal numbers for the wait family of syscalls.
7985    Assume all other status bits are the same.  */
7986 int host_to_target_waitstatus(int status)
7987 {
7988     if (WIFSIGNALED(status)) {
7989         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7990     }
7991     if (WIFSTOPPED(status)) {
7992         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7993                | (status & 0xff);
7994     }
7995     return status;
7996 }
7997 
7998 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7999 {
8000     CPUState *cpu = env_cpu(cpu_env);
8001     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8002     int i;
8003 
8004     for (i = 0; i < bprm->argc; i++) {
8005         size_t len = strlen(bprm->argv[i]) + 1;
8006 
8007         if (write(fd, bprm->argv[i], len) != len) {
8008             return -1;
8009         }
8010     }
8011 
8012     return 0;
8013 }
8014 
8015 static int open_self_maps(CPUArchState *cpu_env, int fd)
8016 {
8017     CPUState *cpu = env_cpu(cpu_env);
8018     TaskState *ts = cpu->opaque;
8019     GSList *map_info = read_self_maps();
8020     GSList *s;
8021     int count;
8022 
8023     for (s = map_info; s; s = g_slist_next(s)) {
8024         MapInfo *e = (MapInfo *) s->data;
8025 
8026         if (h2g_valid(e->start)) {
8027             unsigned long min = e->start;
8028             unsigned long max = e->end;
8029             int flags = page_get_flags(h2g(min));
8030             const char *path;
8031 
8032             max = h2g_valid(max - 1) ?
8033                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8034 
8035             if (page_check_range(h2g(min), max - min, flags) == -1) {
8036                 continue;
8037             }
8038 
8039 #ifdef TARGET_HPPA
8040             if (h2g(max) == ts->info->stack_limit) {
8041 #else
8042             if (h2g(min) == ts->info->stack_limit) {
8043 #endif
8044                 path = "[stack]";
8045             } else {
8046                 path = e->path;
8047             }
8048 
8049             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8050                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8051                             h2g(min), h2g(max - 1) + 1,
8052                             (flags & PAGE_READ) ? 'r' : '-',
8053                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8054                             (flags & PAGE_EXEC) ? 'x' : '-',
8055                             e->is_priv ? 'p' : 's',
8056                             (uint64_t) e->offset, e->dev, e->inode);
8057             if (path) {
8058                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8059             } else {
8060                 dprintf(fd, "\n");
8061             }
8062         }
8063     }
8064 
8065     free_self_maps(map_info);
8066 
8067 #ifdef TARGET_VSYSCALL_PAGE
8068     /*
8069      * We only support execution from the vsyscall page.
8070      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8071      */
8072     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8073                     " --xp 00000000 00:00 0",
8074                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8075     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8076 #endif
8077 
8078     return 0;
8079 }
8080 
8081 static int open_self_stat(CPUArchState *cpu_env, int fd)
8082 {
8083     CPUState *cpu = env_cpu(cpu_env);
8084     TaskState *ts = cpu->opaque;
8085     g_autoptr(GString) buf = g_string_new(NULL);
8086     int i;
8087 
8088     for (i = 0; i < 44; i++) {
8089         if (i == 0) {
8090             /* pid */
8091             g_string_printf(buf, FMT_pid " ", getpid());
8092         } else if (i == 1) {
8093             /* app name */
8094             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8095             bin = bin ? bin + 1 : ts->bprm->argv[0];
8096             g_string_printf(buf, "(%.15s) ", bin);
8097         } else if (i == 2) {
8098             /* task state */
8099             g_string_assign(buf, "R "); /* we are running right now */
8100         } else if (i == 3) {
8101             /* ppid */
8102             g_string_printf(buf, FMT_pid " ", getppid());
8103         } else if (i == 21) {
8104             /* starttime */
8105             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8106         } else if (i == 27) {
8107             /* stack bottom */
8108             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8109         } else {
8110             /* for the rest, there is MasterCard */
8111             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8112         }
8113 
8114         if (write(fd, buf->str, buf->len) != buf->len) {
8115             return -1;
8116         }
8117     }
8118 
8119     return 0;
8120 }
8121 
8122 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8123 {
8124     CPUState *cpu = env_cpu(cpu_env);
8125     TaskState *ts = cpu->opaque;
8126     abi_ulong auxv = ts->info->saved_auxv;
8127     abi_ulong len = ts->info->auxv_len;
8128     char *ptr;
8129 
8130     /*
8131      * Auxiliary vector is stored in target process stack.
8132      * read in whole auxv vector and copy it to file
8133      */
8134     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8135     if (ptr != NULL) {
8136         while (len > 0) {
8137             ssize_t r;
8138             r = write(fd, ptr, len);
8139             if (r <= 0) {
8140                 break;
8141             }
8142             len -= r;
8143             ptr += r;
8144         }
8145         lseek(fd, 0, SEEK_SET);
8146         unlock_user(ptr, auxv, len);
8147     }
8148 
8149     return 0;
8150 }
8151 
8152 static int is_proc_myself(const char *filename, const char *entry)
8153 {
8154     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8155         filename += strlen("/proc/");
8156         if (!strncmp(filename, "self/", strlen("self/"))) {
8157             filename += strlen("self/");
8158         } else if (*filename >= '1' && *filename <= '9') {
8159             char myself[80];
8160             snprintf(myself, sizeof(myself), "%d/", getpid());
8161             if (!strncmp(filename, myself, strlen(myself))) {
8162                 filename += strlen(myself);
8163             } else {
8164                 return 0;
8165             }
8166         } else {
8167             return 0;
8168         }
8169         if (!strcmp(filename, entry)) {
8170             return 1;
8171         }
8172     }
8173     return 0;
8174 }
8175 
8176 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8177                       const char *fmt, int code)
8178 {
8179     if (logfile) {
8180         CPUState *cs = env_cpu(env);
8181 
8182         fprintf(logfile, fmt, code);
8183         fprintf(logfile, "Failing executable: %s\n", exec_path);
8184         cpu_dump_state(cs, logfile, 0);
8185         open_self_maps(env, fileno(logfile));
8186     }
8187 }
8188 
8189 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8190 {
8191     /* dump to console */
8192     excp_dump_file(stderr, env, fmt, code);
8193 
8194     /* dump to log file */
8195     if (qemu_log_separate()) {
8196         FILE *logfile = qemu_log_trylock();
8197 
8198         excp_dump_file(logfile, env, fmt, code);
8199         qemu_log_unlock(logfile);
8200     }
8201 }
8202 
8203 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8204     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8205 static int is_proc(const char *filename, const char *entry)
8206 {
8207     return strcmp(filename, entry) == 0;
8208 }
8209 #endif
8210 
8211 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8212 static int open_net_route(CPUArchState *cpu_env, int fd)
8213 {
8214     FILE *fp;
8215     char *line = NULL;
8216     size_t len = 0;
8217     ssize_t read;
8218 
8219     fp = fopen("/proc/net/route", "r");
8220     if (fp == NULL) {
8221         return -1;
8222     }
8223 
8224     /* read header */
8225 
8226     read = getline(&line, &len, fp);
8227     dprintf(fd, "%s", line);
8228 
8229     /* read routes */
8230 
8231     while ((read = getline(&line, &len, fp)) != -1) {
8232         char iface[16];
8233         uint32_t dest, gw, mask;
8234         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8235         int fields;
8236 
8237         fields = sscanf(line,
8238                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8239                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8240                         &mask, &mtu, &window, &irtt);
8241         if (fields != 11) {
8242             continue;
8243         }
8244         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8245                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8246                 metric, tswap32(mask), mtu, window, irtt);
8247     }
8248 
8249     free(line);
8250     fclose(fp);
8251 
8252     return 0;
8253 }
8254 #endif
8255 
8256 #if defined(TARGET_SPARC)
8257 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8258 {
8259     dprintf(fd, "type\t\t: sun4u\n");
8260     return 0;
8261 }
8262 #endif
8263 
8264 #if defined(TARGET_HPPA)
8265 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8266 {
8267     int i, num_cpus;
8268 
8269     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8270     for (i = 0; i < num_cpus; i++) {
8271         dprintf(fd, "processor\t: %d\n", i);
8272         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8273         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8274         dprintf(fd, "capabilities\t: os32\n");
8275         dprintf(fd, "model\t\t: 9000/778/B160L - "
8276                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8277     }
8278     return 0;
8279 }
8280 #endif
8281 
8282 #if defined(TARGET_M68K)
8283 static int open_hardware(CPUArchState *cpu_env, int fd)
8284 {
8285     dprintf(fd, "Model:\t\tqemu-m68k\n");
8286     return 0;
8287 }
8288 #endif
8289 
8290 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8291 {
8292     struct fake_open {
8293         const char *filename;
8294         int (*fill)(CPUArchState *cpu_env, int fd);
8295         int (*cmp)(const char *s1, const char *s2);
8296     };
8297     const struct fake_open *fake_open;
8298     static const struct fake_open fakes[] = {
8299         { "maps", open_self_maps, is_proc_myself },
8300         { "stat", open_self_stat, is_proc_myself },
8301         { "auxv", open_self_auxv, is_proc_myself },
8302         { "cmdline", open_self_cmdline, is_proc_myself },
8303 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8304         { "/proc/net/route", open_net_route, is_proc },
8305 #endif
8306 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8307         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8308 #endif
8309 #if defined(TARGET_M68K)
8310         { "/proc/hardware", open_hardware, is_proc },
8311 #endif
8312         { NULL, NULL, NULL }
8313     };
8314 
8315     if (is_proc_myself(pathname, "exe")) {
8316         return safe_openat(dirfd, exec_path, flags, mode);
8317     }
8318 
8319     for (fake_open = fakes; fake_open->filename; fake_open++) {
8320         if (fake_open->cmp(pathname, fake_open->filename)) {
8321             break;
8322         }
8323     }
8324 
8325     if (fake_open->filename) {
8326         const char *tmpdir;
8327         char filename[PATH_MAX];
8328         int fd, r;
8329 
8330         fd = memfd_create("qemu-open", 0);
8331         if (fd < 0) {
8332             if (errno != ENOSYS) {
8333                 return fd;
8334             }
8335             /* create temporary file to map stat to */
8336             tmpdir = getenv("TMPDIR");
8337             if (!tmpdir)
8338                 tmpdir = "/tmp";
8339             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8340             fd = mkstemp(filename);
8341             if (fd < 0) {
8342                 return fd;
8343             }
8344             unlink(filename);
8345         }
8346 
8347         if ((r = fake_open->fill(cpu_env, fd))) {
8348             int e = errno;
8349             close(fd);
8350             errno = e;
8351             return r;
8352         }
8353         lseek(fd, 0, SEEK_SET);
8354 
8355         return fd;
8356     }
8357 
8358     return safe_openat(dirfd, path(pathname), flags, mode);
8359 }
8360 
8361 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8362                        abi_long pathname, abi_long guest_argp,
8363                        abi_long guest_envp, int flags)
8364 {
8365     int ret;
8366     char **argp, **envp;
8367     int argc, envc;
8368     abi_ulong gp;
8369     abi_ulong addr;
8370     char **q;
8371     void *p;
8372 
8373     argc = 0;
8374 
8375     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8376         if (get_user_ual(addr, gp)) {
8377             return -TARGET_EFAULT;
8378         }
8379         if (!addr) {
8380             break;
8381         }
8382         argc++;
8383     }
8384     envc = 0;
8385     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8386         if (get_user_ual(addr, gp)) {
8387             return -TARGET_EFAULT;
8388         }
8389         if (!addr) {
8390             break;
8391         }
8392         envc++;
8393     }
8394 
8395     argp = g_new0(char *, argc + 1);
8396     envp = g_new0(char *, envc + 1);
8397 
8398     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8399         if (get_user_ual(addr, gp)) {
8400             goto execve_efault;
8401         }
8402         if (!addr) {
8403             break;
8404         }
8405         *q = lock_user_string(addr);
8406         if (!*q) {
8407             goto execve_efault;
8408         }
8409     }
8410     *q = NULL;
8411 
8412     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8413         if (get_user_ual(addr, gp)) {
8414             goto execve_efault;
8415         }
8416         if (!addr) {
8417             break;
8418         }
8419         *q = lock_user_string(addr);
8420         if (!*q) {
8421             goto execve_efault;
8422         }
8423     }
8424     *q = NULL;
8425 
8426     /*
8427      * Although execve() is not an interruptible syscall it is
8428      * a special case where we must use the safe_syscall wrapper:
8429      * if we allow a signal to happen before we make the host
8430      * syscall then we will 'lose' it, because at the point of
8431      * execve the process leaves QEMU's control. So we use the
8432      * safe syscall wrapper to ensure that we either take the
8433      * signal as a guest signal, or else it does not happen
8434      * before the execve completes and makes it the other
8435      * program's problem.
8436      */
8437     p = lock_user_string(pathname);
8438     if (!p) {
8439         goto execve_efault;
8440     }
8441 
8442     if (is_proc_myself(p, "exe")) {
8443         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8444     } else {
8445         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8446     }
8447 
8448     unlock_user(p, pathname, 0);
8449 
8450     goto execve_end;
8451 
8452 execve_efault:
8453     ret = -TARGET_EFAULT;
8454 
8455 execve_end:
8456     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8457         if (get_user_ual(addr, gp) || !addr) {
8458             break;
8459         }
8460         unlock_user(*q, addr, 0);
8461     }
8462     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8463         if (get_user_ual(addr, gp) || !addr) {
8464             break;
8465         }
8466         unlock_user(*q, addr, 0);
8467     }
8468 
8469     g_free(argp);
8470     g_free(envp);
8471     return ret;
8472 }
8473 
8474 #define TIMER_MAGIC 0x0caf0000
8475 #define TIMER_MAGIC_MASK 0xffff0000
8476 
8477 /* Convert QEMU provided timer ID back to internal 16bit index format */
8478 static target_timer_t get_timer_id(abi_long arg)
8479 {
8480     target_timer_t timerid = arg;
8481 
8482     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8483         return -TARGET_EINVAL;
8484     }
8485 
8486     timerid &= 0xffff;
8487 
8488     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8489         return -TARGET_EINVAL;
8490     }
8491 
8492     return timerid;
8493 }
8494 
8495 static int target_to_host_cpu_mask(unsigned long *host_mask,
8496                                    size_t host_size,
8497                                    abi_ulong target_addr,
8498                                    size_t target_size)
8499 {
8500     unsigned target_bits = sizeof(abi_ulong) * 8;
8501     unsigned host_bits = sizeof(*host_mask) * 8;
8502     abi_ulong *target_mask;
8503     unsigned i, j;
8504 
8505     assert(host_size >= target_size);
8506 
8507     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8508     if (!target_mask) {
8509         return -TARGET_EFAULT;
8510     }
8511     memset(host_mask, 0, host_size);
8512 
8513     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8514         unsigned bit = i * target_bits;
8515         abi_ulong val;
8516 
8517         __get_user(val, &target_mask[i]);
8518         for (j = 0; j < target_bits; j++, bit++) {
8519             if (val & (1UL << j)) {
8520                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8521             }
8522         }
8523     }
8524 
8525     unlock_user(target_mask, target_addr, 0);
8526     return 0;
8527 }
8528 
8529 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8530                                    size_t host_size,
8531                                    abi_ulong target_addr,
8532                                    size_t target_size)
8533 {
8534     unsigned target_bits = sizeof(abi_ulong) * 8;
8535     unsigned host_bits = sizeof(*host_mask) * 8;
8536     abi_ulong *target_mask;
8537     unsigned i, j;
8538 
8539     assert(host_size >= target_size);
8540 
8541     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8542     if (!target_mask) {
8543         return -TARGET_EFAULT;
8544     }
8545 
8546     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8547         unsigned bit = i * target_bits;
8548         abi_ulong val = 0;
8549 
8550         for (j = 0; j < target_bits; j++, bit++) {
8551             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8552                 val |= 1UL << j;
8553             }
8554         }
8555         __put_user(val, &target_mask[i]);
8556     }
8557 
8558     unlock_user(target_mask, target_addr, target_size);
8559     return 0;
8560 }
8561 
8562 #ifdef TARGET_NR_getdents
8563 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8564 {
8565     g_autofree void *hdirp = NULL;
8566     void *tdirp;
8567     int hlen, hoff, toff;
8568     int hreclen, treclen;
8569     off64_t prev_diroff = 0;
8570 
8571     hdirp = g_try_malloc(count);
8572     if (!hdirp) {
8573         return -TARGET_ENOMEM;
8574     }
8575 
8576 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8577     hlen = sys_getdents(dirfd, hdirp, count);
8578 #else
8579     hlen = sys_getdents64(dirfd, hdirp, count);
8580 #endif
8581 
8582     hlen = get_errno(hlen);
8583     if (is_error(hlen)) {
8584         return hlen;
8585     }
8586 
8587     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8588     if (!tdirp) {
8589         return -TARGET_EFAULT;
8590     }
8591 
8592     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8593 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8594         struct linux_dirent *hde = hdirp + hoff;
8595 #else
8596         struct linux_dirent64 *hde = hdirp + hoff;
8597 #endif
8598         struct target_dirent *tde = tdirp + toff;
8599         int namelen;
8600         uint8_t type;
8601 
8602         namelen = strlen(hde->d_name);
8603         hreclen = hde->d_reclen;
8604         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8605         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8606 
8607         if (toff + treclen > count) {
8608             /*
8609              * If the host struct is smaller than the target struct, or
8610              * requires less alignment and thus packs into less space,
8611              * then the host can return more entries than we can pass
8612              * on to the guest.
8613              */
8614             if (toff == 0) {
8615                 toff = -TARGET_EINVAL; /* result buffer is too small */
8616                 break;
8617             }
8618             /*
8619              * Return what we have, resetting the file pointer to the
8620              * location of the first record not returned.
8621              */
8622             lseek64(dirfd, prev_diroff, SEEK_SET);
8623             break;
8624         }
8625 
8626         prev_diroff = hde->d_off;
8627         tde->d_ino = tswapal(hde->d_ino);
8628         tde->d_off = tswapal(hde->d_off);
8629         tde->d_reclen = tswap16(treclen);
8630         memcpy(tde->d_name, hde->d_name, namelen + 1);
8631 
8632         /*
8633          * The getdents type is in what was formerly a padding byte at the
8634          * end of the structure.
8635          */
8636 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8637         type = *((uint8_t *)hde + hreclen - 1);
8638 #else
8639         type = hde->d_type;
8640 #endif
8641         *((uint8_t *)tde + treclen - 1) = type;
8642     }
8643 
8644     unlock_user(tdirp, arg2, toff);
8645     return toff;
8646 }
8647 #endif /* TARGET_NR_getdents */
8648 
8649 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8650 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8651 {
8652     g_autofree void *hdirp = NULL;
8653     void *tdirp;
8654     int hlen, hoff, toff;
8655     int hreclen, treclen;
8656     off64_t prev_diroff = 0;
8657 
8658     hdirp = g_try_malloc(count);
8659     if (!hdirp) {
8660         return -TARGET_ENOMEM;
8661     }
8662 
8663     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8664     if (is_error(hlen)) {
8665         return hlen;
8666     }
8667 
8668     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8669     if (!tdirp) {
8670         return -TARGET_EFAULT;
8671     }
8672 
8673     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8674         struct linux_dirent64 *hde = hdirp + hoff;
8675         struct target_dirent64 *tde = tdirp + toff;
8676         int namelen;
8677 
8678         namelen = strlen(hde->d_name) + 1;
8679         hreclen = hde->d_reclen;
8680         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8681         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8682 
8683         if (toff + treclen > count) {
8684             /*
8685              * If the host struct is smaller than the target struct, or
8686              * requires less alignment and thus packs into less space,
8687              * then the host can return more entries than we can pass
8688              * on to the guest.
8689              */
8690             if (toff == 0) {
8691                 toff = -TARGET_EINVAL; /* result buffer is too small */
8692                 break;
8693             }
8694             /*
8695              * Return what we have, resetting the file pointer to the
8696              * location of the first record not returned.
8697              */
8698             lseek64(dirfd, prev_diroff, SEEK_SET);
8699             break;
8700         }
8701 
8702         prev_diroff = hde->d_off;
8703         tde->d_ino = tswap64(hde->d_ino);
8704         tde->d_off = tswap64(hde->d_off);
8705         tde->d_reclen = tswap16(treclen);
8706         tde->d_type = hde->d_type;
8707         memcpy(tde->d_name, hde->d_name, namelen);
8708     }
8709 
8710     unlock_user(tdirp, arg2, toff);
8711     return toff;
8712 }
8713 #endif /* TARGET_NR_getdents64 */
8714 
8715 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8716 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8717 #endif
8718 
8719 /* This is an internal helper for do_syscall so that it is easier
8720  * to have a single return point, so that actions, such as logging
8721  * of syscall results, can be performed.
8722  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8723  */
8724 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8725                             abi_long arg2, abi_long arg3, abi_long arg4,
8726                             abi_long arg5, abi_long arg6, abi_long arg7,
8727                             abi_long arg8)
8728 {
8729     CPUState *cpu = env_cpu(cpu_env);
8730     abi_long ret;
8731 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8732     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8733     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8734     || defined(TARGET_NR_statx)
8735     struct stat st;
8736 #endif
8737 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8738     || defined(TARGET_NR_fstatfs)
8739     struct statfs stfs;
8740 #endif
8741     void *p;
8742 
8743     switch(num) {
8744     case TARGET_NR_exit:
8745         /* In old applications this may be used to implement _exit(2).
8746            However in threaded applications it is used for thread termination,
8747            and _exit_group is used for application termination.
8748            Do thread termination if we have more then one thread.  */
8749 
8750         if (block_signals()) {
8751             return -QEMU_ERESTARTSYS;
8752         }
8753 
8754         pthread_mutex_lock(&clone_lock);
8755 
8756         if (CPU_NEXT(first_cpu)) {
8757             TaskState *ts = cpu->opaque;
8758 
8759             if (ts->child_tidptr) {
8760                 put_user_u32(0, ts->child_tidptr);
8761                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8762                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8763             }
8764 
8765             object_unparent(OBJECT(cpu));
8766             object_unref(OBJECT(cpu));
8767             /*
8768              * At this point the CPU should be unrealized and removed
8769              * from cpu lists. We can clean-up the rest of the thread
8770              * data without the lock held.
8771              */
8772 
8773             pthread_mutex_unlock(&clone_lock);
8774 
8775             thread_cpu = NULL;
8776             g_free(ts);
8777             rcu_unregister_thread();
8778             pthread_exit(NULL);
8779         }
8780 
8781         pthread_mutex_unlock(&clone_lock);
8782         preexit_cleanup(cpu_env, arg1);
8783         _exit(arg1);
8784         return 0; /* avoid warning */
8785     case TARGET_NR_read:
8786         if (arg2 == 0 && arg3 == 0) {
8787             return get_errno(safe_read(arg1, 0, 0));
8788         } else {
8789             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8790                 return -TARGET_EFAULT;
8791             ret = get_errno(safe_read(arg1, p, arg3));
8792             if (ret >= 0 &&
8793                 fd_trans_host_to_target_data(arg1)) {
8794                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8795             }
8796             unlock_user(p, arg2, ret);
8797         }
8798         return ret;
8799     case TARGET_NR_write:
8800         if (arg2 == 0 && arg3 == 0) {
8801             return get_errno(safe_write(arg1, 0, 0));
8802         }
8803         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8804             return -TARGET_EFAULT;
8805         if (fd_trans_target_to_host_data(arg1)) {
8806             void *copy = g_malloc(arg3);
8807             memcpy(copy, p, arg3);
8808             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8809             if (ret >= 0) {
8810                 ret = get_errno(safe_write(arg1, copy, ret));
8811             }
8812             g_free(copy);
8813         } else {
8814             ret = get_errno(safe_write(arg1, p, arg3));
8815         }
8816         unlock_user(p, arg2, 0);
8817         return ret;
8818 
8819 #ifdef TARGET_NR_open
8820     case TARGET_NR_open:
8821         if (!(p = lock_user_string(arg1)))
8822             return -TARGET_EFAULT;
8823         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8824                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8825                                   arg3));
8826         fd_trans_unregister(ret);
8827         unlock_user(p, arg1, 0);
8828         return ret;
8829 #endif
8830     case TARGET_NR_openat:
8831         if (!(p = lock_user_string(arg2)))
8832             return -TARGET_EFAULT;
8833         ret = get_errno(do_openat(cpu_env, arg1, p,
8834                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8835                                   arg4));
8836         fd_trans_unregister(ret);
8837         unlock_user(p, arg2, 0);
8838         return ret;
8839 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8840     case TARGET_NR_name_to_handle_at:
8841         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8842         return ret;
8843 #endif
8844 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8845     case TARGET_NR_open_by_handle_at:
8846         ret = do_open_by_handle_at(arg1, arg2, arg3);
8847         fd_trans_unregister(ret);
8848         return ret;
8849 #endif
8850 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8851     case TARGET_NR_pidfd_open:
8852         return get_errno(pidfd_open(arg1, arg2));
8853 #endif
8854 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8855     case TARGET_NR_pidfd_send_signal:
8856         {
8857             siginfo_t uinfo, *puinfo;
8858 
8859             if (arg3) {
8860                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8861                 if (!p) {
8862                     return -TARGET_EFAULT;
8863                  }
8864                  target_to_host_siginfo(&uinfo, p);
8865                  unlock_user(p, arg3, 0);
8866                  puinfo = &uinfo;
8867             } else {
8868                  puinfo = NULL;
8869             }
8870             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8871                                               puinfo, arg4));
8872         }
8873         return ret;
8874 #endif
8875 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8876     case TARGET_NR_pidfd_getfd:
8877         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8878 #endif
8879     case TARGET_NR_close:
8880         fd_trans_unregister(arg1);
8881         return get_errno(close(arg1));
8882 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8883     case TARGET_NR_close_range:
8884         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8885         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8886             abi_long fd, maxfd;
8887             maxfd = MIN(arg2, target_fd_max);
8888             for (fd = arg1; fd < maxfd; fd++) {
8889                 fd_trans_unregister(fd);
8890             }
8891         }
8892         return ret;
8893 #endif
8894 
8895     case TARGET_NR_brk:
8896         return do_brk(arg1);
8897 #ifdef TARGET_NR_fork
8898     case TARGET_NR_fork:
8899         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8900 #endif
8901 #ifdef TARGET_NR_waitpid
8902     case TARGET_NR_waitpid:
8903         {
8904             int status;
8905             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8906             if (!is_error(ret) && arg2 && ret
8907                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8908                 return -TARGET_EFAULT;
8909         }
8910         return ret;
8911 #endif
8912 #ifdef TARGET_NR_waitid
8913     case TARGET_NR_waitid:
8914         {
8915             siginfo_t info;
8916             info.si_pid = 0;
8917             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8918             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8919                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8920                     return -TARGET_EFAULT;
8921                 host_to_target_siginfo(p, &info);
8922                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8923             }
8924         }
8925         return ret;
8926 #endif
8927 #ifdef TARGET_NR_creat /* not on alpha */
8928     case TARGET_NR_creat:
8929         if (!(p = lock_user_string(arg1)))
8930             return -TARGET_EFAULT;
8931         ret = get_errno(creat(p, arg2));
8932         fd_trans_unregister(ret);
8933         unlock_user(p, arg1, 0);
8934         return ret;
8935 #endif
8936 #ifdef TARGET_NR_link
8937     case TARGET_NR_link:
8938         {
8939             void * p2;
8940             p = lock_user_string(arg1);
8941             p2 = lock_user_string(arg2);
8942             if (!p || !p2)
8943                 ret = -TARGET_EFAULT;
8944             else
8945                 ret = get_errno(link(p, p2));
8946             unlock_user(p2, arg2, 0);
8947             unlock_user(p, arg1, 0);
8948         }
8949         return ret;
8950 #endif
8951 #if defined(TARGET_NR_linkat)
8952     case TARGET_NR_linkat:
8953         {
8954             void * p2 = NULL;
8955             if (!arg2 || !arg4)
8956                 return -TARGET_EFAULT;
8957             p  = lock_user_string(arg2);
8958             p2 = lock_user_string(arg4);
8959             if (!p || !p2)
8960                 ret = -TARGET_EFAULT;
8961             else
8962                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8963             unlock_user(p, arg2, 0);
8964             unlock_user(p2, arg4, 0);
8965         }
8966         return ret;
8967 #endif
8968 #ifdef TARGET_NR_unlink
8969     case TARGET_NR_unlink:
8970         if (!(p = lock_user_string(arg1)))
8971             return -TARGET_EFAULT;
8972         ret = get_errno(unlink(p));
8973         unlock_user(p, arg1, 0);
8974         return ret;
8975 #endif
8976 #if defined(TARGET_NR_unlinkat)
8977     case TARGET_NR_unlinkat:
8978         if (!(p = lock_user_string(arg2)))
8979             return -TARGET_EFAULT;
8980         ret = get_errno(unlinkat(arg1, p, arg3));
8981         unlock_user(p, arg2, 0);
8982         return ret;
8983 #endif
8984     case TARGET_NR_execveat:
8985         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
8986     case TARGET_NR_execve:
8987         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
8988     case TARGET_NR_chdir:
8989         if (!(p = lock_user_string(arg1)))
8990             return -TARGET_EFAULT;
8991         ret = get_errno(chdir(p));
8992         unlock_user(p, arg1, 0);
8993         return ret;
8994 #ifdef TARGET_NR_time
8995     case TARGET_NR_time:
8996         {
8997             time_t host_time;
8998             ret = get_errno(time(&host_time));
8999             if (!is_error(ret)
9000                 && arg1
9001                 && put_user_sal(host_time, arg1))
9002                 return -TARGET_EFAULT;
9003         }
9004         return ret;
9005 #endif
9006 #ifdef TARGET_NR_mknod
9007     case TARGET_NR_mknod:
9008         if (!(p = lock_user_string(arg1)))
9009             return -TARGET_EFAULT;
9010         ret = get_errno(mknod(p, arg2, arg3));
9011         unlock_user(p, arg1, 0);
9012         return ret;
9013 #endif
9014 #if defined(TARGET_NR_mknodat)
9015     case TARGET_NR_mknodat:
9016         if (!(p = lock_user_string(arg2)))
9017             return -TARGET_EFAULT;
9018         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9019         unlock_user(p, arg2, 0);
9020         return ret;
9021 #endif
9022 #ifdef TARGET_NR_chmod
9023     case TARGET_NR_chmod:
9024         if (!(p = lock_user_string(arg1)))
9025             return -TARGET_EFAULT;
9026         ret = get_errno(chmod(p, arg2));
9027         unlock_user(p, arg1, 0);
9028         return ret;
9029 #endif
9030 #ifdef TARGET_NR_lseek
9031     case TARGET_NR_lseek:
9032         return get_errno(lseek(arg1, arg2, arg3));
9033 #endif
9034 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9035     /* Alpha specific */
9036     case TARGET_NR_getxpid:
9037         cpu_env->ir[IR_A4] = getppid();
9038         return get_errno(getpid());
9039 #endif
9040 #ifdef TARGET_NR_getpid
9041     case TARGET_NR_getpid:
9042         return get_errno(getpid());
9043 #endif
9044     case TARGET_NR_mount:
9045         {
9046             /* need to look at the data field */
9047             void *p2, *p3;
9048 
9049             if (arg1) {
9050                 p = lock_user_string(arg1);
9051                 if (!p) {
9052                     return -TARGET_EFAULT;
9053                 }
9054             } else {
9055                 p = NULL;
9056             }
9057 
9058             p2 = lock_user_string(arg2);
9059             if (!p2) {
9060                 if (arg1) {
9061                     unlock_user(p, arg1, 0);
9062                 }
9063                 return -TARGET_EFAULT;
9064             }
9065 
9066             if (arg3) {
9067                 p3 = lock_user_string(arg3);
9068                 if (!p3) {
9069                     if (arg1) {
9070                         unlock_user(p, arg1, 0);
9071                     }
9072                     unlock_user(p2, arg2, 0);
9073                     return -TARGET_EFAULT;
9074                 }
9075             } else {
9076                 p3 = NULL;
9077             }
9078 
9079             /* FIXME - arg5 should be locked, but it isn't clear how to
9080              * do that since it's not guaranteed to be a NULL-terminated
9081              * string.
9082              */
9083             if (!arg5) {
9084                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9085             } else {
9086                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9087             }
9088             ret = get_errno(ret);
9089 
9090             if (arg1) {
9091                 unlock_user(p, arg1, 0);
9092             }
9093             unlock_user(p2, arg2, 0);
9094             if (arg3) {
9095                 unlock_user(p3, arg3, 0);
9096             }
9097         }
9098         return ret;
9099 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9100 #if defined(TARGET_NR_umount)
9101     case TARGET_NR_umount:
9102 #endif
9103 #if defined(TARGET_NR_oldumount)
9104     case TARGET_NR_oldumount:
9105 #endif
9106         if (!(p = lock_user_string(arg1)))
9107             return -TARGET_EFAULT;
9108         ret = get_errno(umount(p));
9109         unlock_user(p, arg1, 0);
9110         return ret;
9111 #endif
9112 #ifdef TARGET_NR_stime /* not on alpha */
9113     case TARGET_NR_stime:
9114         {
9115             struct timespec ts;
9116             ts.tv_nsec = 0;
9117             if (get_user_sal(ts.tv_sec, arg1)) {
9118                 return -TARGET_EFAULT;
9119             }
9120             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9121         }
9122 #endif
9123 #ifdef TARGET_NR_alarm /* not on alpha */
9124     case TARGET_NR_alarm:
9125         return alarm(arg1);
9126 #endif
9127 #ifdef TARGET_NR_pause /* not on alpha */
9128     case TARGET_NR_pause:
9129         if (!block_signals()) {
9130             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9131         }
9132         return -TARGET_EINTR;
9133 #endif
9134 #ifdef TARGET_NR_utime
9135     case TARGET_NR_utime:
9136         {
9137             struct utimbuf tbuf, *host_tbuf;
9138             struct target_utimbuf *target_tbuf;
9139             if (arg2) {
9140                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9141                     return -TARGET_EFAULT;
9142                 tbuf.actime = tswapal(target_tbuf->actime);
9143                 tbuf.modtime = tswapal(target_tbuf->modtime);
9144                 unlock_user_struct(target_tbuf, arg2, 0);
9145                 host_tbuf = &tbuf;
9146             } else {
9147                 host_tbuf = NULL;
9148             }
9149             if (!(p = lock_user_string(arg1)))
9150                 return -TARGET_EFAULT;
9151             ret = get_errno(utime(p, host_tbuf));
9152             unlock_user(p, arg1, 0);
9153         }
9154         return ret;
9155 #endif
9156 #ifdef TARGET_NR_utimes
9157     case TARGET_NR_utimes:
9158         {
9159             struct timeval *tvp, tv[2];
9160             if (arg2) {
9161                 if (copy_from_user_timeval(&tv[0], arg2)
9162                     || copy_from_user_timeval(&tv[1],
9163                                               arg2 + sizeof(struct target_timeval)))
9164                     return -TARGET_EFAULT;
9165                 tvp = tv;
9166             } else {
9167                 tvp = NULL;
9168             }
9169             if (!(p = lock_user_string(arg1)))
9170                 return -TARGET_EFAULT;
9171             ret = get_errno(utimes(p, tvp));
9172             unlock_user(p, arg1, 0);
9173         }
9174         return ret;
9175 #endif
9176 #if defined(TARGET_NR_futimesat)
9177     case TARGET_NR_futimesat:
9178         {
9179             struct timeval *tvp, tv[2];
9180             if (arg3) {
9181                 if (copy_from_user_timeval(&tv[0], arg3)
9182                     || copy_from_user_timeval(&tv[1],
9183                                               arg3 + sizeof(struct target_timeval)))
9184                     return -TARGET_EFAULT;
9185                 tvp = tv;
9186             } else {
9187                 tvp = NULL;
9188             }
9189             if (!(p = lock_user_string(arg2))) {
9190                 return -TARGET_EFAULT;
9191             }
9192             ret = get_errno(futimesat(arg1, path(p), tvp));
9193             unlock_user(p, arg2, 0);
9194         }
9195         return ret;
9196 #endif
9197 #ifdef TARGET_NR_access
9198     case TARGET_NR_access:
9199         if (!(p = lock_user_string(arg1))) {
9200             return -TARGET_EFAULT;
9201         }
9202         ret = get_errno(access(path(p), arg2));
9203         unlock_user(p, arg1, 0);
9204         return ret;
9205 #endif
9206 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9207     case TARGET_NR_faccessat:
9208         if (!(p = lock_user_string(arg2))) {
9209             return -TARGET_EFAULT;
9210         }
9211         ret = get_errno(faccessat(arg1, p, arg3, 0));
9212         unlock_user(p, arg2, 0);
9213         return ret;
9214 #endif
9215 #if defined(TARGET_NR_faccessat2)
9216     case TARGET_NR_faccessat2:
9217         if (!(p = lock_user_string(arg2))) {
9218             return -TARGET_EFAULT;
9219         }
9220         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9221         unlock_user(p, arg2, 0);
9222         return ret;
9223 #endif
9224 #ifdef TARGET_NR_nice /* not on alpha */
9225     case TARGET_NR_nice:
9226         return get_errno(nice(arg1));
9227 #endif
9228     case TARGET_NR_sync:
9229         sync();
9230         return 0;
9231 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9232     case TARGET_NR_syncfs:
9233         return get_errno(syncfs(arg1));
9234 #endif
9235     case TARGET_NR_kill:
9236         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9237 #ifdef TARGET_NR_rename
9238     case TARGET_NR_rename:
9239         {
9240             void *p2;
9241             p = lock_user_string(arg1);
9242             p2 = lock_user_string(arg2);
9243             if (!p || !p2)
9244                 ret = -TARGET_EFAULT;
9245             else
9246                 ret = get_errno(rename(p, p2));
9247             unlock_user(p2, arg2, 0);
9248             unlock_user(p, arg1, 0);
9249         }
9250         return ret;
9251 #endif
9252 #if defined(TARGET_NR_renameat)
9253     case TARGET_NR_renameat:
9254         {
9255             void *p2;
9256             p  = lock_user_string(arg2);
9257             p2 = lock_user_string(arg4);
9258             if (!p || !p2)
9259                 ret = -TARGET_EFAULT;
9260             else
9261                 ret = get_errno(renameat(arg1, p, arg3, p2));
9262             unlock_user(p2, arg4, 0);
9263             unlock_user(p, arg2, 0);
9264         }
9265         return ret;
9266 #endif
9267 #if defined(TARGET_NR_renameat2)
9268     case TARGET_NR_renameat2:
9269         {
9270             void *p2;
9271             p  = lock_user_string(arg2);
9272             p2 = lock_user_string(arg4);
9273             if (!p || !p2) {
9274                 ret = -TARGET_EFAULT;
9275             } else {
9276                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9277             }
9278             unlock_user(p2, arg4, 0);
9279             unlock_user(p, arg2, 0);
9280         }
9281         return ret;
9282 #endif
9283 #ifdef TARGET_NR_mkdir
9284     case TARGET_NR_mkdir:
9285         if (!(p = lock_user_string(arg1)))
9286             return -TARGET_EFAULT;
9287         ret = get_errno(mkdir(p, arg2));
9288         unlock_user(p, arg1, 0);
9289         return ret;
9290 #endif
9291 #if defined(TARGET_NR_mkdirat)
9292     case TARGET_NR_mkdirat:
9293         if (!(p = lock_user_string(arg2)))
9294             return -TARGET_EFAULT;
9295         ret = get_errno(mkdirat(arg1, p, arg3));
9296         unlock_user(p, arg2, 0);
9297         return ret;
9298 #endif
9299 #ifdef TARGET_NR_rmdir
9300     case TARGET_NR_rmdir:
9301         if (!(p = lock_user_string(arg1)))
9302             return -TARGET_EFAULT;
9303         ret = get_errno(rmdir(p));
9304         unlock_user(p, arg1, 0);
9305         return ret;
9306 #endif
9307     case TARGET_NR_dup:
9308         ret = get_errno(dup(arg1));
9309         if (ret >= 0) {
9310             fd_trans_dup(arg1, ret);
9311         }
9312         return ret;
9313 #ifdef TARGET_NR_pipe
9314     case TARGET_NR_pipe:
9315         return do_pipe(cpu_env, arg1, 0, 0);
9316 #endif
9317 #ifdef TARGET_NR_pipe2
9318     case TARGET_NR_pipe2:
9319         return do_pipe(cpu_env, arg1,
9320                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9321 #endif
9322     case TARGET_NR_times:
9323         {
9324             struct target_tms *tmsp;
9325             struct tms tms;
9326             ret = get_errno(times(&tms));
9327             if (arg1) {
9328                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9329                 if (!tmsp)
9330                     return -TARGET_EFAULT;
9331                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9332                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9333                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9334                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9335             }
9336             if (!is_error(ret))
9337                 ret = host_to_target_clock_t(ret);
9338         }
9339         return ret;
9340     case TARGET_NR_acct:
9341         if (arg1 == 0) {
9342             ret = get_errno(acct(NULL));
9343         } else {
9344             if (!(p = lock_user_string(arg1))) {
9345                 return -TARGET_EFAULT;
9346             }
9347             ret = get_errno(acct(path(p)));
9348             unlock_user(p, arg1, 0);
9349         }
9350         return ret;
9351 #ifdef TARGET_NR_umount2
9352     case TARGET_NR_umount2:
9353         if (!(p = lock_user_string(arg1)))
9354             return -TARGET_EFAULT;
9355         ret = get_errno(umount2(p, arg2));
9356         unlock_user(p, arg1, 0);
9357         return ret;
9358 #endif
9359     case TARGET_NR_ioctl:
9360         return do_ioctl(arg1, arg2, arg3);
9361 #ifdef TARGET_NR_fcntl
9362     case TARGET_NR_fcntl:
9363         return do_fcntl(arg1, arg2, arg3);
9364 #endif
9365     case TARGET_NR_setpgid:
9366         return get_errno(setpgid(arg1, arg2));
9367     case TARGET_NR_umask:
9368         return get_errno(umask(arg1));
9369     case TARGET_NR_chroot:
9370         if (!(p = lock_user_string(arg1)))
9371             return -TARGET_EFAULT;
9372         ret = get_errno(chroot(p));
9373         unlock_user(p, arg1, 0);
9374         return ret;
9375 #ifdef TARGET_NR_dup2
9376     case TARGET_NR_dup2:
9377         ret = get_errno(dup2(arg1, arg2));
9378         if (ret >= 0) {
9379             fd_trans_dup(arg1, arg2);
9380         }
9381         return ret;
9382 #endif
9383 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9384     case TARGET_NR_dup3:
9385     {
9386         int host_flags;
9387 
9388         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9389             return -EINVAL;
9390         }
9391         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9392         ret = get_errno(dup3(arg1, arg2, host_flags));
9393         if (ret >= 0) {
9394             fd_trans_dup(arg1, arg2);
9395         }
9396         return ret;
9397     }
9398 #endif
9399 #ifdef TARGET_NR_getppid /* not on alpha */
9400     case TARGET_NR_getppid:
9401         return get_errno(getppid());
9402 #endif
9403 #ifdef TARGET_NR_getpgrp
9404     case TARGET_NR_getpgrp:
9405         return get_errno(getpgrp());
9406 #endif
9407     case TARGET_NR_setsid:
9408         return get_errno(setsid());
9409 #ifdef TARGET_NR_sigaction
9410     case TARGET_NR_sigaction:
9411         {
9412 #if defined(TARGET_MIPS)
9413 	    struct target_sigaction act, oact, *pact, *old_act;
9414 
9415 	    if (arg2) {
9416                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9417                     return -TARGET_EFAULT;
9418 		act._sa_handler = old_act->_sa_handler;
9419 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9420 		act.sa_flags = old_act->sa_flags;
9421 		unlock_user_struct(old_act, arg2, 0);
9422 		pact = &act;
9423 	    } else {
9424 		pact = NULL;
9425 	    }
9426 
9427         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9428 
9429 	    if (!is_error(ret) && arg3) {
9430                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9431                     return -TARGET_EFAULT;
9432 		old_act->_sa_handler = oact._sa_handler;
9433 		old_act->sa_flags = oact.sa_flags;
9434 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9435 		old_act->sa_mask.sig[1] = 0;
9436 		old_act->sa_mask.sig[2] = 0;
9437 		old_act->sa_mask.sig[3] = 0;
9438 		unlock_user_struct(old_act, arg3, 1);
9439 	    }
9440 #else
9441             struct target_old_sigaction *old_act;
9442             struct target_sigaction act, oact, *pact;
9443             if (arg2) {
9444                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9445                     return -TARGET_EFAULT;
9446                 act._sa_handler = old_act->_sa_handler;
9447                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9448                 act.sa_flags = old_act->sa_flags;
9449 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9450                 act.sa_restorer = old_act->sa_restorer;
9451 #endif
9452                 unlock_user_struct(old_act, arg2, 0);
9453                 pact = &act;
9454             } else {
9455                 pact = NULL;
9456             }
9457             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9458             if (!is_error(ret) && arg3) {
9459                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9460                     return -TARGET_EFAULT;
9461                 old_act->_sa_handler = oact._sa_handler;
9462                 old_act->sa_mask = oact.sa_mask.sig[0];
9463                 old_act->sa_flags = oact.sa_flags;
9464 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9465                 old_act->sa_restorer = oact.sa_restorer;
9466 #endif
9467                 unlock_user_struct(old_act, arg3, 1);
9468             }
9469 #endif
9470         }
9471         return ret;
9472 #endif
9473     case TARGET_NR_rt_sigaction:
9474         {
9475             /*
9476              * For Alpha and SPARC this is a 5 argument syscall, with
9477              * a 'restorer' parameter which must be copied into the
9478              * sa_restorer field of the sigaction struct.
9479              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9480              * and arg5 is the sigsetsize.
9481              */
9482 #if defined(TARGET_ALPHA)
9483             target_ulong sigsetsize = arg4;
9484             target_ulong restorer = arg5;
9485 #elif defined(TARGET_SPARC)
9486             target_ulong restorer = arg4;
9487             target_ulong sigsetsize = arg5;
9488 #else
9489             target_ulong sigsetsize = arg4;
9490             target_ulong restorer = 0;
9491 #endif
9492             struct target_sigaction *act = NULL;
9493             struct target_sigaction *oact = NULL;
9494 
9495             if (sigsetsize != sizeof(target_sigset_t)) {
9496                 return -TARGET_EINVAL;
9497             }
9498             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9499                 return -TARGET_EFAULT;
9500             }
9501             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9502                 ret = -TARGET_EFAULT;
9503             } else {
9504                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9505                 if (oact) {
9506                     unlock_user_struct(oact, arg3, 1);
9507                 }
9508             }
9509             if (act) {
9510                 unlock_user_struct(act, arg2, 0);
9511             }
9512         }
9513         return ret;
9514 #ifdef TARGET_NR_sgetmask /* not on alpha */
9515     case TARGET_NR_sgetmask:
9516         {
9517             sigset_t cur_set;
9518             abi_ulong target_set;
9519             ret = do_sigprocmask(0, NULL, &cur_set);
9520             if (!ret) {
9521                 host_to_target_old_sigset(&target_set, &cur_set);
9522                 ret = target_set;
9523             }
9524         }
9525         return ret;
9526 #endif
9527 #ifdef TARGET_NR_ssetmask /* not on alpha */
9528     case TARGET_NR_ssetmask:
9529         {
9530             sigset_t set, oset;
9531             abi_ulong target_set = arg1;
9532             target_to_host_old_sigset(&set, &target_set);
9533             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9534             if (!ret) {
9535                 host_to_target_old_sigset(&target_set, &oset);
9536                 ret = target_set;
9537             }
9538         }
9539         return ret;
9540 #endif
9541 #ifdef TARGET_NR_sigprocmask
9542     case TARGET_NR_sigprocmask:
9543         {
9544 #if defined(TARGET_ALPHA)
9545             sigset_t set, oldset;
9546             abi_ulong mask;
9547             int how;
9548 
9549             switch (arg1) {
9550             case TARGET_SIG_BLOCK:
9551                 how = SIG_BLOCK;
9552                 break;
9553             case TARGET_SIG_UNBLOCK:
9554                 how = SIG_UNBLOCK;
9555                 break;
9556             case TARGET_SIG_SETMASK:
9557                 how = SIG_SETMASK;
9558                 break;
9559             default:
9560                 return -TARGET_EINVAL;
9561             }
9562             mask = arg2;
9563             target_to_host_old_sigset(&set, &mask);
9564 
9565             ret = do_sigprocmask(how, &set, &oldset);
9566             if (!is_error(ret)) {
9567                 host_to_target_old_sigset(&mask, &oldset);
9568                 ret = mask;
9569                 cpu_env->ir[IR_V0] = 0; /* force no error */
9570             }
9571 #else
9572             sigset_t set, oldset, *set_ptr;
9573             int how;
9574 
9575             if (arg2) {
9576                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9577                 if (!p) {
9578                     return -TARGET_EFAULT;
9579                 }
9580                 target_to_host_old_sigset(&set, p);
9581                 unlock_user(p, arg2, 0);
9582                 set_ptr = &set;
9583                 switch (arg1) {
9584                 case TARGET_SIG_BLOCK:
9585                     how = SIG_BLOCK;
9586                     break;
9587                 case TARGET_SIG_UNBLOCK:
9588                     how = SIG_UNBLOCK;
9589                     break;
9590                 case TARGET_SIG_SETMASK:
9591                     how = SIG_SETMASK;
9592                     break;
9593                 default:
9594                     return -TARGET_EINVAL;
9595                 }
9596             } else {
9597                 how = 0;
9598                 set_ptr = NULL;
9599             }
9600             ret = do_sigprocmask(how, set_ptr, &oldset);
9601             if (!is_error(ret) && arg3) {
9602                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9603                     return -TARGET_EFAULT;
9604                 host_to_target_old_sigset(p, &oldset);
9605                 unlock_user(p, arg3, sizeof(target_sigset_t));
9606             }
9607 #endif
9608         }
9609         return ret;
9610 #endif
9611     case TARGET_NR_rt_sigprocmask:
9612         {
9613             int how = arg1;
9614             sigset_t set, oldset, *set_ptr;
9615 
9616             if (arg4 != sizeof(target_sigset_t)) {
9617                 return -TARGET_EINVAL;
9618             }
9619 
9620             if (arg2) {
9621                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9622                 if (!p) {
9623                     return -TARGET_EFAULT;
9624                 }
9625                 target_to_host_sigset(&set, p);
9626                 unlock_user(p, arg2, 0);
9627                 set_ptr = &set;
9628                 switch(how) {
9629                 case TARGET_SIG_BLOCK:
9630                     how = SIG_BLOCK;
9631                     break;
9632                 case TARGET_SIG_UNBLOCK:
9633                     how = SIG_UNBLOCK;
9634                     break;
9635                 case TARGET_SIG_SETMASK:
9636                     how = SIG_SETMASK;
9637                     break;
9638                 default:
9639                     return -TARGET_EINVAL;
9640                 }
9641             } else {
9642                 how = 0;
9643                 set_ptr = NULL;
9644             }
9645             ret = do_sigprocmask(how, set_ptr, &oldset);
9646             if (!is_error(ret) && arg3) {
9647                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9648                     return -TARGET_EFAULT;
9649                 host_to_target_sigset(p, &oldset);
9650                 unlock_user(p, arg3, sizeof(target_sigset_t));
9651             }
9652         }
9653         return ret;
9654 #ifdef TARGET_NR_sigpending
9655     case TARGET_NR_sigpending:
9656         {
9657             sigset_t set;
9658             ret = get_errno(sigpending(&set));
9659             if (!is_error(ret)) {
9660                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9661                     return -TARGET_EFAULT;
9662                 host_to_target_old_sigset(p, &set);
9663                 unlock_user(p, arg1, sizeof(target_sigset_t));
9664             }
9665         }
9666         return ret;
9667 #endif
9668     case TARGET_NR_rt_sigpending:
9669         {
9670             sigset_t set;
9671 
9672             /* Yes, this check is >, not != like most. We follow the kernel's
9673              * logic and it does it like this because it implements
9674              * NR_sigpending through the same code path, and in that case
9675              * the old_sigset_t is smaller in size.
9676              */
9677             if (arg2 > sizeof(target_sigset_t)) {
9678                 return -TARGET_EINVAL;
9679             }
9680 
9681             ret = get_errno(sigpending(&set));
9682             if (!is_error(ret)) {
9683                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9684                     return -TARGET_EFAULT;
9685                 host_to_target_sigset(p, &set);
9686                 unlock_user(p, arg1, sizeof(target_sigset_t));
9687             }
9688         }
9689         return ret;
9690 #ifdef TARGET_NR_sigsuspend
9691     case TARGET_NR_sigsuspend:
9692         {
9693             sigset_t *set;
9694 
9695 #if defined(TARGET_ALPHA)
9696             TaskState *ts = cpu->opaque;
9697             /* target_to_host_old_sigset will bswap back */
9698             abi_ulong mask = tswapal(arg1);
9699             set = &ts->sigsuspend_mask;
9700             target_to_host_old_sigset(set, &mask);
9701 #else
9702             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9703             if (ret != 0) {
9704                 return ret;
9705             }
9706 #endif
9707             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9708             finish_sigsuspend_mask(ret);
9709         }
9710         return ret;
9711 #endif
9712     case TARGET_NR_rt_sigsuspend:
9713         {
9714             sigset_t *set;
9715 
9716             ret = process_sigsuspend_mask(&set, arg1, arg2);
9717             if (ret != 0) {
9718                 return ret;
9719             }
9720             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9721             finish_sigsuspend_mask(ret);
9722         }
9723         return ret;
9724 #ifdef TARGET_NR_rt_sigtimedwait
9725     case TARGET_NR_rt_sigtimedwait:
9726         {
9727             sigset_t set;
9728             struct timespec uts, *puts;
9729             siginfo_t uinfo;
9730 
9731             if (arg4 != sizeof(target_sigset_t)) {
9732                 return -TARGET_EINVAL;
9733             }
9734 
9735             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9736                 return -TARGET_EFAULT;
9737             target_to_host_sigset(&set, p);
9738             unlock_user(p, arg1, 0);
9739             if (arg3) {
9740                 puts = &uts;
9741                 if (target_to_host_timespec(puts, arg3)) {
9742                     return -TARGET_EFAULT;
9743                 }
9744             } else {
9745                 puts = NULL;
9746             }
9747             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9748                                                  SIGSET_T_SIZE));
9749             if (!is_error(ret)) {
9750                 if (arg2) {
9751                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9752                                   0);
9753                     if (!p) {
9754                         return -TARGET_EFAULT;
9755                     }
9756                     host_to_target_siginfo(p, &uinfo);
9757                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9758                 }
9759                 ret = host_to_target_signal(ret);
9760             }
9761         }
9762         return ret;
9763 #endif
9764 #ifdef TARGET_NR_rt_sigtimedwait_time64
9765     case TARGET_NR_rt_sigtimedwait_time64:
9766         {
9767             sigset_t set;
9768             struct timespec uts, *puts;
9769             siginfo_t uinfo;
9770 
9771             if (arg4 != sizeof(target_sigset_t)) {
9772                 return -TARGET_EINVAL;
9773             }
9774 
9775             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9776             if (!p) {
9777                 return -TARGET_EFAULT;
9778             }
9779             target_to_host_sigset(&set, p);
9780             unlock_user(p, arg1, 0);
9781             if (arg3) {
9782                 puts = &uts;
9783                 if (target_to_host_timespec64(puts, arg3)) {
9784                     return -TARGET_EFAULT;
9785                 }
9786             } else {
9787                 puts = NULL;
9788             }
9789             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9790                                                  SIGSET_T_SIZE));
9791             if (!is_error(ret)) {
9792                 if (arg2) {
9793                     p = lock_user(VERIFY_WRITE, arg2,
9794                                   sizeof(target_siginfo_t), 0);
9795                     if (!p) {
9796                         return -TARGET_EFAULT;
9797                     }
9798                     host_to_target_siginfo(p, &uinfo);
9799                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9800                 }
9801                 ret = host_to_target_signal(ret);
9802             }
9803         }
9804         return ret;
9805 #endif
9806     case TARGET_NR_rt_sigqueueinfo:
9807         {
9808             siginfo_t uinfo;
9809 
9810             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9811             if (!p) {
9812                 return -TARGET_EFAULT;
9813             }
9814             target_to_host_siginfo(&uinfo, p);
9815             unlock_user(p, arg3, 0);
9816             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9817         }
9818         return ret;
9819     case TARGET_NR_rt_tgsigqueueinfo:
9820         {
9821             siginfo_t uinfo;
9822 
9823             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9824             if (!p) {
9825                 return -TARGET_EFAULT;
9826             }
9827             target_to_host_siginfo(&uinfo, p);
9828             unlock_user(p, arg4, 0);
9829             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9830         }
9831         return ret;
9832 #ifdef TARGET_NR_sigreturn
9833     case TARGET_NR_sigreturn:
9834         if (block_signals()) {
9835             return -QEMU_ERESTARTSYS;
9836         }
9837         return do_sigreturn(cpu_env);
9838 #endif
9839     case TARGET_NR_rt_sigreturn:
9840         if (block_signals()) {
9841             return -QEMU_ERESTARTSYS;
9842         }
9843         return do_rt_sigreturn(cpu_env);
9844     case TARGET_NR_sethostname:
9845         if (!(p = lock_user_string(arg1)))
9846             return -TARGET_EFAULT;
9847         ret = get_errno(sethostname(p, arg2));
9848         unlock_user(p, arg1, 0);
9849         return ret;
9850 #ifdef TARGET_NR_setrlimit
9851     case TARGET_NR_setrlimit:
9852         {
9853             int resource = target_to_host_resource(arg1);
9854             struct target_rlimit *target_rlim;
9855             struct rlimit rlim;
9856             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9857                 return -TARGET_EFAULT;
9858             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9859             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9860             unlock_user_struct(target_rlim, arg2, 0);
9861             /*
9862              * If we just passed through resource limit settings for memory then
9863              * they would also apply to QEMU's own allocations, and QEMU will
9864              * crash or hang or die if its allocations fail. Ideally we would
9865              * track the guest allocations in QEMU and apply the limits ourselves.
9866              * For now, just tell the guest the call succeeded but don't actually
9867              * limit anything.
9868              */
9869             if (resource != RLIMIT_AS &&
9870                 resource != RLIMIT_DATA &&
9871                 resource != RLIMIT_STACK) {
9872                 return get_errno(setrlimit(resource, &rlim));
9873             } else {
9874                 return 0;
9875             }
9876         }
9877 #endif
9878 #ifdef TARGET_NR_getrlimit
9879     case TARGET_NR_getrlimit:
9880         {
9881             int resource = target_to_host_resource(arg1);
9882             struct target_rlimit *target_rlim;
9883             struct rlimit rlim;
9884 
9885             ret = get_errno(getrlimit(resource, &rlim));
9886             if (!is_error(ret)) {
9887                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9888                     return -TARGET_EFAULT;
9889                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9890                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9891                 unlock_user_struct(target_rlim, arg2, 1);
9892             }
9893         }
9894         return ret;
9895 #endif
9896     case TARGET_NR_getrusage:
9897         {
9898             struct rusage rusage;
9899             ret = get_errno(getrusage(arg1, &rusage));
9900             if (!is_error(ret)) {
9901                 ret = host_to_target_rusage(arg2, &rusage);
9902             }
9903         }
9904         return ret;
9905 #if defined(TARGET_NR_gettimeofday)
9906     case TARGET_NR_gettimeofday:
9907         {
9908             struct timeval tv;
9909             struct timezone tz;
9910 
9911             ret = get_errno(gettimeofday(&tv, &tz));
9912             if (!is_error(ret)) {
9913                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9914                     return -TARGET_EFAULT;
9915                 }
9916                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9917                     return -TARGET_EFAULT;
9918                 }
9919             }
9920         }
9921         return ret;
9922 #endif
9923 #if defined(TARGET_NR_settimeofday)
9924     case TARGET_NR_settimeofday:
9925         {
9926             struct timeval tv, *ptv = NULL;
9927             struct timezone tz, *ptz = NULL;
9928 
9929             if (arg1) {
9930                 if (copy_from_user_timeval(&tv, arg1)) {
9931                     return -TARGET_EFAULT;
9932                 }
9933                 ptv = &tv;
9934             }
9935 
9936             if (arg2) {
9937                 if (copy_from_user_timezone(&tz, arg2)) {
9938                     return -TARGET_EFAULT;
9939                 }
9940                 ptz = &tz;
9941             }
9942 
9943             return get_errno(settimeofday(ptv, ptz));
9944         }
9945 #endif
9946 #if defined(TARGET_NR_select)
9947     case TARGET_NR_select:
9948 #if defined(TARGET_WANT_NI_OLD_SELECT)
9949         /* some architectures used to have old_select here
9950          * but now ENOSYS it.
9951          */
9952         ret = -TARGET_ENOSYS;
9953 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9954         ret = do_old_select(arg1);
9955 #else
9956         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9957 #endif
9958         return ret;
9959 #endif
9960 #ifdef TARGET_NR_pselect6
9961     case TARGET_NR_pselect6:
9962         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9963 #endif
9964 #ifdef TARGET_NR_pselect6_time64
9965     case TARGET_NR_pselect6_time64:
9966         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9967 #endif
9968 #ifdef TARGET_NR_symlink
9969     case TARGET_NR_symlink:
9970         {
9971             void *p2;
9972             p = lock_user_string(arg1);
9973             p2 = lock_user_string(arg2);
9974             if (!p || !p2)
9975                 ret = -TARGET_EFAULT;
9976             else
9977                 ret = get_errno(symlink(p, p2));
9978             unlock_user(p2, arg2, 0);
9979             unlock_user(p, arg1, 0);
9980         }
9981         return ret;
9982 #endif
9983 #if defined(TARGET_NR_symlinkat)
9984     case TARGET_NR_symlinkat:
9985         {
9986             void *p2;
9987             p  = lock_user_string(arg1);
9988             p2 = lock_user_string(arg3);
9989             if (!p || !p2)
9990                 ret = -TARGET_EFAULT;
9991             else
9992                 ret = get_errno(symlinkat(p, arg2, p2));
9993             unlock_user(p2, arg3, 0);
9994             unlock_user(p, arg1, 0);
9995         }
9996         return ret;
9997 #endif
9998 #ifdef TARGET_NR_readlink
9999     case TARGET_NR_readlink:
10000         {
10001             void *p2;
10002             p = lock_user_string(arg1);
10003             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10004             if (!p || !p2) {
10005                 ret = -TARGET_EFAULT;
10006             } else if (!arg3) {
10007                 /* Short circuit this for the magic exe check. */
10008                 ret = -TARGET_EINVAL;
10009             } else if (is_proc_myself((const char *)p, "exe")) {
10010                 /*
10011                  * Don't worry about sign mismatch as earlier mapping
10012                  * logic would have thrown a bad address error.
10013                  */
10014                 ret = MIN(strlen(exec_path), arg3);
10015                 /* We cannot NUL terminate the string. */
10016                 memcpy(p2, exec_path, ret);
10017             } else {
10018                 ret = get_errno(readlink(path(p), p2, arg3));
10019             }
10020             unlock_user(p2, arg2, ret);
10021             unlock_user(p, arg1, 0);
10022         }
10023         return ret;
10024 #endif
10025 #if defined(TARGET_NR_readlinkat)
10026     case TARGET_NR_readlinkat:
10027         {
10028             void *p2;
10029             p  = lock_user_string(arg2);
10030             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10031             if (!p || !p2) {
10032                 ret = -TARGET_EFAULT;
10033             } else if (!arg4) {
10034                 /* Short circuit this for the magic exe check. */
10035                 ret = -TARGET_EINVAL;
10036             } else if (is_proc_myself((const char *)p, "exe")) {
10037                 /*
10038                  * Don't worry about sign mismatch as earlier mapping
10039                  * logic would have thrown a bad address error.
10040                  */
10041                 ret = MIN(strlen(exec_path), arg4);
10042                 /* We cannot NUL terminate the string. */
10043                 memcpy(p2, exec_path, ret);
10044             } else {
10045                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10046             }
10047             unlock_user(p2, arg3, ret);
10048             unlock_user(p, arg2, 0);
10049         }
10050         return ret;
10051 #endif
10052 #ifdef TARGET_NR_swapon
10053     case TARGET_NR_swapon:
10054         if (!(p = lock_user_string(arg1)))
10055             return -TARGET_EFAULT;
10056         ret = get_errno(swapon(p, arg2));
10057         unlock_user(p, arg1, 0);
10058         return ret;
10059 #endif
10060     case TARGET_NR_reboot:
10061         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10062            /* arg4 must be ignored in all other cases */
10063            p = lock_user_string(arg4);
10064            if (!p) {
10065                return -TARGET_EFAULT;
10066            }
10067            ret = get_errno(reboot(arg1, arg2, arg3, p));
10068            unlock_user(p, arg4, 0);
10069         } else {
10070            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10071         }
10072         return ret;
10073 #ifdef TARGET_NR_mmap
10074     case TARGET_NR_mmap:
10075 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10076     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10077     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10078     || defined(TARGET_S390X)
10079         {
10080             abi_ulong *v;
10081             abi_ulong v1, v2, v3, v4, v5, v6;
10082             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10083                 return -TARGET_EFAULT;
10084             v1 = tswapal(v[0]);
10085             v2 = tswapal(v[1]);
10086             v3 = tswapal(v[2]);
10087             v4 = tswapal(v[3]);
10088             v5 = tswapal(v[4]);
10089             v6 = tswapal(v[5]);
10090             unlock_user(v, arg1, 0);
10091             ret = get_errno(target_mmap(v1, v2, v3,
10092                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10093                                         v5, v6));
10094         }
10095 #else
10096         /* mmap pointers are always untagged */
10097         ret = get_errno(target_mmap(arg1, arg2, arg3,
10098                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10099                                     arg5,
10100                                     arg6));
10101 #endif
10102         return ret;
10103 #endif
10104 #ifdef TARGET_NR_mmap2
10105     case TARGET_NR_mmap2:
10106 #ifndef MMAP_SHIFT
10107 #define MMAP_SHIFT 12
10108 #endif
10109         ret = target_mmap(arg1, arg2, arg3,
10110                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10111                           arg5, arg6 << MMAP_SHIFT);
10112         return get_errno(ret);
10113 #endif
10114     case TARGET_NR_munmap:
10115         arg1 = cpu_untagged_addr(cpu, arg1);
10116         return get_errno(target_munmap(arg1, arg2));
10117     case TARGET_NR_mprotect:
10118         arg1 = cpu_untagged_addr(cpu, arg1);
10119         {
10120             TaskState *ts = cpu->opaque;
10121             /* Special hack to detect libc making the stack executable.  */
10122             if ((arg3 & PROT_GROWSDOWN)
10123                 && arg1 >= ts->info->stack_limit
10124                 && arg1 <= ts->info->start_stack) {
10125                 arg3 &= ~PROT_GROWSDOWN;
10126                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10127                 arg1 = ts->info->stack_limit;
10128             }
10129         }
10130         return get_errno(target_mprotect(arg1, arg2, arg3));
10131 #ifdef TARGET_NR_mremap
10132     case TARGET_NR_mremap:
10133         arg1 = cpu_untagged_addr(cpu, arg1);
10134         /* mremap new_addr (arg5) is always untagged */
10135         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10136 #endif
10137         /* ??? msync/mlock/munlock are broken for softmmu.  */
10138 #ifdef TARGET_NR_msync
10139     case TARGET_NR_msync:
10140         return get_errno(msync(g2h(cpu, arg1), arg2,
10141                                target_to_host_msync_arg(arg3)));
10142 #endif
10143 #ifdef TARGET_NR_mlock
10144     case TARGET_NR_mlock:
10145         return get_errno(mlock(g2h(cpu, arg1), arg2));
10146 #endif
10147 #ifdef TARGET_NR_munlock
10148     case TARGET_NR_munlock:
10149         return get_errno(munlock(g2h(cpu, arg1), arg2));
10150 #endif
10151 #ifdef TARGET_NR_mlockall
10152     case TARGET_NR_mlockall:
10153         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10154 #endif
10155 #ifdef TARGET_NR_munlockall
10156     case TARGET_NR_munlockall:
10157         return get_errno(munlockall());
10158 #endif
10159 #ifdef TARGET_NR_truncate
10160     case TARGET_NR_truncate:
10161         if (!(p = lock_user_string(arg1)))
10162             return -TARGET_EFAULT;
10163         ret = get_errno(truncate(p, arg2));
10164         unlock_user(p, arg1, 0);
10165         return ret;
10166 #endif
10167 #ifdef TARGET_NR_ftruncate
10168     case TARGET_NR_ftruncate:
10169         return get_errno(ftruncate(arg1, arg2));
10170 #endif
10171     case TARGET_NR_fchmod:
10172         return get_errno(fchmod(arg1, arg2));
10173 #if defined(TARGET_NR_fchmodat)
10174     case TARGET_NR_fchmodat:
10175         if (!(p = lock_user_string(arg2)))
10176             return -TARGET_EFAULT;
10177         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10178         unlock_user(p, arg2, 0);
10179         return ret;
10180 #endif
10181     case TARGET_NR_getpriority:
10182         /* Note that negative values are valid for getpriority, so we must
10183            differentiate based on errno settings.  */
10184         errno = 0;
10185         ret = getpriority(arg1, arg2);
10186         if (ret == -1 && errno != 0) {
10187             return -host_to_target_errno(errno);
10188         }
10189 #ifdef TARGET_ALPHA
10190         /* Return value is the unbiased priority.  Signal no error.  */
10191         cpu_env->ir[IR_V0] = 0;
10192 #else
10193         /* Return value is a biased priority to avoid negative numbers.  */
10194         ret = 20 - ret;
10195 #endif
10196         return ret;
10197     case TARGET_NR_setpriority:
10198         return get_errno(setpriority(arg1, arg2, arg3));
10199 #ifdef TARGET_NR_statfs
10200     case TARGET_NR_statfs:
10201         if (!(p = lock_user_string(arg1))) {
10202             return -TARGET_EFAULT;
10203         }
10204         ret = get_errno(statfs(path(p), &stfs));
10205         unlock_user(p, arg1, 0);
10206     convert_statfs:
10207         if (!is_error(ret)) {
10208             struct target_statfs *target_stfs;
10209 
10210             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10211                 return -TARGET_EFAULT;
10212             __put_user(stfs.f_type, &target_stfs->f_type);
10213             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10214             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10215             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10216             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10217             __put_user(stfs.f_files, &target_stfs->f_files);
10218             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10219             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10220             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10221             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10222             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10223 #ifdef _STATFS_F_FLAGS
10224             __put_user(stfs.f_flags, &target_stfs->f_flags);
10225 #else
10226             __put_user(0, &target_stfs->f_flags);
10227 #endif
10228             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10229             unlock_user_struct(target_stfs, arg2, 1);
10230         }
10231         return ret;
10232 #endif
10233 #ifdef TARGET_NR_fstatfs
10234     case TARGET_NR_fstatfs:
10235         ret = get_errno(fstatfs(arg1, &stfs));
10236         goto convert_statfs;
10237 #endif
10238 #ifdef TARGET_NR_statfs64
10239     case TARGET_NR_statfs64:
10240         if (!(p = lock_user_string(arg1))) {
10241             return -TARGET_EFAULT;
10242         }
10243         ret = get_errno(statfs(path(p), &stfs));
10244         unlock_user(p, arg1, 0);
10245     convert_statfs64:
10246         if (!is_error(ret)) {
10247             struct target_statfs64 *target_stfs;
10248 
10249             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10250                 return -TARGET_EFAULT;
10251             __put_user(stfs.f_type, &target_stfs->f_type);
10252             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10253             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10254             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10255             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10256             __put_user(stfs.f_files, &target_stfs->f_files);
10257             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10258             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10259             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10260             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10261             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10262 #ifdef _STATFS_F_FLAGS
10263             __put_user(stfs.f_flags, &target_stfs->f_flags);
10264 #else
10265             __put_user(0, &target_stfs->f_flags);
10266 #endif
10267             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10268             unlock_user_struct(target_stfs, arg3, 1);
10269         }
10270         return ret;
10271     case TARGET_NR_fstatfs64:
10272         ret = get_errno(fstatfs(arg1, &stfs));
10273         goto convert_statfs64;
10274 #endif
10275 #ifdef TARGET_NR_socketcall
10276     case TARGET_NR_socketcall:
10277         return do_socketcall(arg1, arg2);
10278 #endif
10279 #ifdef TARGET_NR_accept
10280     case TARGET_NR_accept:
10281         return do_accept4(arg1, arg2, arg3, 0);
10282 #endif
10283 #ifdef TARGET_NR_accept4
10284     case TARGET_NR_accept4:
10285         return do_accept4(arg1, arg2, arg3, arg4);
10286 #endif
10287 #ifdef TARGET_NR_bind
10288     case TARGET_NR_bind:
10289         return do_bind(arg1, arg2, arg3);
10290 #endif
10291 #ifdef TARGET_NR_connect
10292     case TARGET_NR_connect:
10293         return do_connect(arg1, arg2, arg3);
10294 #endif
10295 #ifdef TARGET_NR_getpeername
10296     case TARGET_NR_getpeername:
10297         return do_getpeername(arg1, arg2, arg3);
10298 #endif
10299 #ifdef TARGET_NR_getsockname
10300     case TARGET_NR_getsockname:
10301         return do_getsockname(arg1, arg2, arg3);
10302 #endif
10303 #ifdef TARGET_NR_getsockopt
10304     case TARGET_NR_getsockopt:
10305         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10306 #endif
10307 #ifdef TARGET_NR_listen
10308     case TARGET_NR_listen:
10309         return get_errno(listen(arg1, arg2));
10310 #endif
10311 #ifdef TARGET_NR_recv
10312     case TARGET_NR_recv:
10313         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10314 #endif
10315 #ifdef TARGET_NR_recvfrom
10316     case TARGET_NR_recvfrom:
10317         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10318 #endif
10319 #ifdef TARGET_NR_recvmsg
10320     case TARGET_NR_recvmsg:
10321         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10322 #endif
10323 #ifdef TARGET_NR_send
10324     case TARGET_NR_send:
10325         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10326 #endif
10327 #ifdef TARGET_NR_sendmsg
10328     case TARGET_NR_sendmsg:
10329         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10330 #endif
10331 #ifdef TARGET_NR_sendmmsg
10332     case TARGET_NR_sendmmsg:
10333         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10334 #endif
10335 #ifdef TARGET_NR_recvmmsg
10336     case TARGET_NR_recvmmsg:
10337         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10338 #endif
10339 #ifdef TARGET_NR_sendto
10340     case TARGET_NR_sendto:
10341         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10342 #endif
10343 #ifdef TARGET_NR_shutdown
10344     case TARGET_NR_shutdown:
10345         return get_errno(shutdown(arg1, arg2));
10346 #endif
10347 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10348     case TARGET_NR_getrandom:
10349         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10350         if (!p) {
10351             return -TARGET_EFAULT;
10352         }
10353         ret = get_errno(getrandom(p, arg2, arg3));
10354         unlock_user(p, arg1, ret);
10355         return ret;
10356 #endif
10357 #ifdef TARGET_NR_socket
10358     case TARGET_NR_socket:
10359         return do_socket(arg1, arg2, arg3);
10360 #endif
10361 #ifdef TARGET_NR_socketpair
10362     case TARGET_NR_socketpair:
10363         return do_socketpair(arg1, arg2, arg3, arg4);
10364 #endif
10365 #ifdef TARGET_NR_setsockopt
10366     case TARGET_NR_setsockopt:
10367         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10368 #endif
10369 #if defined(TARGET_NR_syslog)
10370     case TARGET_NR_syslog:
10371         {
10372             int len = arg2;
10373 
10374             switch (arg1) {
10375             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10376             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10377             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10378             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10379             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10380             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10381             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10382             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10383                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10384             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10385             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10386             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10387                 {
10388                     if (len < 0) {
10389                         return -TARGET_EINVAL;
10390                     }
10391                     if (len == 0) {
10392                         return 0;
10393                     }
10394                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10395                     if (!p) {
10396                         return -TARGET_EFAULT;
10397                     }
10398                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10399                     unlock_user(p, arg2, arg3);
10400                 }
10401                 return ret;
10402             default:
10403                 return -TARGET_EINVAL;
10404             }
10405         }
10406         break;
10407 #endif
10408     case TARGET_NR_setitimer:
10409         {
10410             struct itimerval value, ovalue, *pvalue;
10411 
10412             if (arg2) {
10413                 pvalue = &value;
10414                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10415                     || copy_from_user_timeval(&pvalue->it_value,
10416                                               arg2 + sizeof(struct target_timeval)))
10417                     return -TARGET_EFAULT;
10418             } else {
10419                 pvalue = NULL;
10420             }
10421             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10422             if (!is_error(ret) && arg3) {
10423                 if (copy_to_user_timeval(arg3,
10424                                          &ovalue.it_interval)
10425                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10426                                             &ovalue.it_value))
10427                     return -TARGET_EFAULT;
10428             }
10429         }
10430         return ret;
10431     case TARGET_NR_getitimer:
10432         {
10433             struct itimerval value;
10434 
10435             ret = get_errno(getitimer(arg1, &value));
10436             if (!is_error(ret) && arg2) {
10437                 if (copy_to_user_timeval(arg2,
10438                                          &value.it_interval)
10439                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10440                                             &value.it_value))
10441                     return -TARGET_EFAULT;
10442             }
10443         }
10444         return ret;
10445 #ifdef TARGET_NR_stat
10446     case TARGET_NR_stat:
10447         if (!(p = lock_user_string(arg1))) {
10448             return -TARGET_EFAULT;
10449         }
10450         ret = get_errno(stat(path(p), &st));
10451         unlock_user(p, arg1, 0);
10452         goto do_stat;
10453 #endif
10454 #ifdef TARGET_NR_lstat
10455     case TARGET_NR_lstat:
10456         if (!(p = lock_user_string(arg1))) {
10457             return -TARGET_EFAULT;
10458         }
10459         ret = get_errno(lstat(path(p), &st));
10460         unlock_user(p, arg1, 0);
10461         goto do_stat;
10462 #endif
10463 #ifdef TARGET_NR_fstat
10464     case TARGET_NR_fstat:
10465         {
10466             ret = get_errno(fstat(arg1, &st));
10467 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10468         do_stat:
10469 #endif
10470             if (!is_error(ret)) {
10471                 struct target_stat *target_st;
10472 
10473                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10474                     return -TARGET_EFAULT;
10475                 memset(target_st, 0, sizeof(*target_st));
10476                 __put_user(st.st_dev, &target_st->st_dev);
10477                 __put_user(st.st_ino, &target_st->st_ino);
10478                 __put_user(st.st_mode, &target_st->st_mode);
10479                 __put_user(st.st_uid, &target_st->st_uid);
10480                 __put_user(st.st_gid, &target_st->st_gid);
10481                 __put_user(st.st_nlink, &target_st->st_nlink);
10482                 __put_user(st.st_rdev, &target_st->st_rdev);
10483                 __put_user(st.st_size, &target_st->st_size);
10484                 __put_user(st.st_blksize, &target_st->st_blksize);
10485                 __put_user(st.st_blocks, &target_st->st_blocks);
10486                 __put_user(st.st_atime, &target_st->target_st_atime);
10487                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10488                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10489 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10490                 __put_user(st.st_atim.tv_nsec,
10491                            &target_st->target_st_atime_nsec);
10492                 __put_user(st.st_mtim.tv_nsec,
10493                            &target_st->target_st_mtime_nsec);
10494                 __put_user(st.st_ctim.tv_nsec,
10495                            &target_st->target_st_ctime_nsec);
10496 #endif
10497                 unlock_user_struct(target_st, arg2, 1);
10498             }
10499         }
10500         return ret;
10501 #endif
10502     case TARGET_NR_vhangup:
10503         return get_errno(vhangup());
10504 #ifdef TARGET_NR_syscall
10505     case TARGET_NR_syscall:
10506         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10507                           arg6, arg7, arg8, 0);
10508 #endif
10509 #if defined(TARGET_NR_wait4)
10510     case TARGET_NR_wait4:
10511         {
10512             int status;
10513             abi_long status_ptr = arg2;
10514             struct rusage rusage, *rusage_ptr;
10515             abi_ulong target_rusage = arg4;
10516             abi_long rusage_err;
10517             if (target_rusage)
10518                 rusage_ptr = &rusage;
10519             else
10520                 rusage_ptr = NULL;
10521             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10522             if (!is_error(ret)) {
10523                 if (status_ptr && ret) {
10524                     status = host_to_target_waitstatus(status);
10525                     if (put_user_s32(status, status_ptr))
10526                         return -TARGET_EFAULT;
10527                 }
10528                 if (target_rusage) {
10529                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10530                     if (rusage_err) {
10531                         ret = rusage_err;
10532                     }
10533                 }
10534             }
10535         }
10536         return ret;
10537 #endif
10538 #ifdef TARGET_NR_swapoff
10539     case TARGET_NR_swapoff:
10540         if (!(p = lock_user_string(arg1)))
10541             return -TARGET_EFAULT;
10542         ret = get_errno(swapoff(p));
10543         unlock_user(p, arg1, 0);
10544         return ret;
10545 #endif
10546     case TARGET_NR_sysinfo:
10547         {
10548             struct target_sysinfo *target_value;
10549             struct sysinfo value;
10550             ret = get_errno(sysinfo(&value));
10551             if (!is_error(ret) && arg1)
10552             {
10553                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10554                     return -TARGET_EFAULT;
10555                 __put_user(value.uptime, &target_value->uptime);
10556                 __put_user(value.loads[0], &target_value->loads[0]);
10557                 __put_user(value.loads[1], &target_value->loads[1]);
10558                 __put_user(value.loads[2], &target_value->loads[2]);
10559                 __put_user(value.totalram, &target_value->totalram);
10560                 __put_user(value.freeram, &target_value->freeram);
10561                 __put_user(value.sharedram, &target_value->sharedram);
10562                 __put_user(value.bufferram, &target_value->bufferram);
10563                 __put_user(value.totalswap, &target_value->totalswap);
10564                 __put_user(value.freeswap, &target_value->freeswap);
10565                 __put_user(value.procs, &target_value->procs);
10566                 __put_user(value.totalhigh, &target_value->totalhigh);
10567                 __put_user(value.freehigh, &target_value->freehigh);
10568                 __put_user(value.mem_unit, &target_value->mem_unit);
10569                 unlock_user_struct(target_value, arg1, 1);
10570             }
10571         }
10572         return ret;
10573 #ifdef TARGET_NR_ipc
10574     case TARGET_NR_ipc:
10575         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10576 #endif
10577 #ifdef TARGET_NR_semget
10578     case TARGET_NR_semget:
10579         return get_errno(semget(arg1, arg2, arg3));
10580 #endif
10581 #ifdef TARGET_NR_semop
10582     case TARGET_NR_semop:
10583         return do_semtimedop(arg1, arg2, arg3, 0, false);
10584 #endif
10585 #ifdef TARGET_NR_semtimedop
10586     case TARGET_NR_semtimedop:
10587         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10588 #endif
10589 #ifdef TARGET_NR_semtimedop_time64
10590     case TARGET_NR_semtimedop_time64:
10591         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10592 #endif
10593 #ifdef TARGET_NR_semctl
10594     case TARGET_NR_semctl:
10595         return do_semctl(arg1, arg2, arg3, arg4);
10596 #endif
10597 #ifdef TARGET_NR_msgctl
10598     case TARGET_NR_msgctl:
10599         return do_msgctl(arg1, arg2, arg3);
10600 #endif
10601 #ifdef TARGET_NR_msgget
10602     case TARGET_NR_msgget:
10603         return get_errno(msgget(arg1, arg2));
10604 #endif
10605 #ifdef TARGET_NR_msgrcv
10606     case TARGET_NR_msgrcv:
10607         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10608 #endif
10609 #ifdef TARGET_NR_msgsnd
10610     case TARGET_NR_msgsnd:
10611         return do_msgsnd(arg1, arg2, arg3, arg4);
10612 #endif
10613 #ifdef TARGET_NR_shmget
10614     case TARGET_NR_shmget:
10615         return get_errno(shmget(arg1, arg2, arg3));
10616 #endif
10617 #ifdef TARGET_NR_shmctl
10618     case TARGET_NR_shmctl:
10619         return do_shmctl(arg1, arg2, arg3);
10620 #endif
10621 #ifdef TARGET_NR_shmat
10622     case TARGET_NR_shmat:
10623         return do_shmat(cpu_env, arg1, arg2, arg3);
10624 #endif
10625 #ifdef TARGET_NR_shmdt
10626     case TARGET_NR_shmdt:
10627         return do_shmdt(arg1);
10628 #endif
10629     case TARGET_NR_fsync:
10630         return get_errno(fsync(arg1));
10631     case TARGET_NR_clone:
10632         /* Linux manages to have three different orderings for its
10633          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10634          * match the kernel's CONFIG_CLONE_* settings.
10635          * Microblaze is further special in that it uses a sixth
10636          * implicit argument to clone for the TLS pointer.
10637          */
10638 #if defined(TARGET_MICROBLAZE)
10639         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10640 #elif defined(TARGET_CLONE_BACKWARDS)
10641         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10642 #elif defined(TARGET_CLONE_BACKWARDS2)
10643         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10644 #else
10645         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10646 #endif
10647         return ret;
10648 #ifdef __NR_exit_group
10649         /* new thread calls */
10650     case TARGET_NR_exit_group:
10651         preexit_cleanup(cpu_env, arg1);
10652         return get_errno(exit_group(arg1));
10653 #endif
10654     case TARGET_NR_setdomainname:
10655         if (!(p = lock_user_string(arg1)))
10656             return -TARGET_EFAULT;
10657         ret = get_errno(setdomainname(p, arg2));
10658         unlock_user(p, arg1, 0);
10659         return ret;
10660     case TARGET_NR_uname:
10661         /* no need to transcode because we use the linux syscall */
10662         {
10663             struct new_utsname * buf;
10664 
10665             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10666                 return -TARGET_EFAULT;
10667             ret = get_errno(sys_uname(buf));
10668             if (!is_error(ret)) {
10669                 /* Overwrite the native machine name with whatever is being
10670                    emulated. */
10671                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10672                           sizeof(buf->machine));
10673                 /* Allow the user to override the reported release.  */
10674                 if (qemu_uname_release && *qemu_uname_release) {
10675                     g_strlcpy(buf->release, qemu_uname_release,
10676                               sizeof(buf->release));
10677                 }
10678             }
10679             unlock_user_struct(buf, arg1, 1);
10680         }
10681         return ret;
10682 #ifdef TARGET_I386
10683     case TARGET_NR_modify_ldt:
10684         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10685 #if !defined(TARGET_X86_64)
10686     case TARGET_NR_vm86:
10687         return do_vm86(cpu_env, arg1, arg2);
10688 #endif
10689 #endif
10690 #if defined(TARGET_NR_adjtimex)
10691     case TARGET_NR_adjtimex:
10692         {
10693             struct timex host_buf;
10694 
10695             if (target_to_host_timex(&host_buf, arg1) != 0) {
10696                 return -TARGET_EFAULT;
10697             }
10698             ret = get_errno(adjtimex(&host_buf));
10699             if (!is_error(ret)) {
10700                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10701                     return -TARGET_EFAULT;
10702                 }
10703             }
10704         }
10705         return ret;
10706 #endif
10707 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10708     case TARGET_NR_clock_adjtime:
10709         {
10710             struct timex htx, *phtx = &htx;
10711 
10712             if (target_to_host_timex(phtx, arg2) != 0) {
10713                 return -TARGET_EFAULT;
10714             }
10715             ret = get_errno(clock_adjtime(arg1, phtx));
10716             if (!is_error(ret) && phtx) {
10717                 if (host_to_target_timex(arg2, phtx) != 0) {
10718                     return -TARGET_EFAULT;
10719                 }
10720             }
10721         }
10722         return ret;
10723 #endif
10724 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10725     case TARGET_NR_clock_adjtime64:
10726         {
10727             struct timex htx;
10728 
10729             if (target_to_host_timex64(&htx, arg2) != 0) {
10730                 return -TARGET_EFAULT;
10731             }
10732             ret = get_errno(clock_adjtime(arg1, &htx));
10733             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10734                     return -TARGET_EFAULT;
10735             }
10736         }
10737         return ret;
10738 #endif
10739     case TARGET_NR_getpgid:
10740         return get_errno(getpgid(arg1));
10741     case TARGET_NR_fchdir:
10742         return get_errno(fchdir(arg1));
10743     case TARGET_NR_personality:
10744         return get_errno(personality(arg1));
10745 #ifdef TARGET_NR__llseek /* Not on alpha */
10746     case TARGET_NR__llseek:
10747         {
10748             int64_t res;
10749 #if !defined(__NR_llseek)
10750             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10751             if (res == -1) {
10752                 ret = get_errno(res);
10753             } else {
10754                 ret = 0;
10755             }
10756 #else
10757             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10758 #endif
10759             if ((ret == 0) && put_user_s64(res, arg4)) {
10760                 return -TARGET_EFAULT;
10761             }
10762         }
10763         return ret;
10764 #endif
10765 #ifdef TARGET_NR_getdents
10766     case TARGET_NR_getdents:
10767         return do_getdents(arg1, arg2, arg3);
10768 #endif /* TARGET_NR_getdents */
10769 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10770     case TARGET_NR_getdents64:
10771         return do_getdents64(arg1, arg2, arg3);
10772 #endif /* TARGET_NR_getdents64 */
10773 #if defined(TARGET_NR__newselect)
10774     case TARGET_NR__newselect:
10775         return do_select(arg1, arg2, arg3, arg4, arg5);
10776 #endif
10777 #ifdef TARGET_NR_poll
10778     case TARGET_NR_poll:
10779         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10780 #endif
10781 #ifdef TARGET_NR_ppoll
10782     case TARGET_NR_ppoll:
10783         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10784 #endif
10785 #ifdef TARGET_NR_ppoll_time64
10786     case TARGET_NR_ppoll_time64:
10787         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10788 #endif
10789     case TARGET_NR_flock:
10790         /* NOTE: the flock constant seems to be the same for every
10791            Linux platform */
10792         return get_errno(safe_flock(arg1, arg2));
10793     case TARGET_NR_readv:
10794         {
10795             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10796             if (vec != NULL) {
10797                 ret = get_errno(safe_readv(arg1, vec, arg3));
10798                 unlock_iovec(vec, arg2, arg3, 1);
10799             } else {
10800                 ret = -host_to_target_errno(errno);
10801             }
10802         }
10803         return ret;
10804     case TARGET_NR_writev:
10805         {
10806             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10807             if (vec != NULL) {
10808                 ret = get_errno(safe_writev(arg1, vec, arg3));
10809                 unlock_iovec(vec, arg2, arg3, 0);
10810             } else {
10811                 ret = -host_to_target_errno(errno);
10812             }
10813         }
10814         return ret;
10815 #if defined(TARGET_NR_preadv)
10816     case TARGET_NR_preadv:
10817         {
10818             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10819             if (vec != NULL) {
10820                 unsigned long low, high;
10821 
10822                 target_to_host_low_high(arg4, arg5, &low, &high);
10823                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10824                 unlock_iovec(vec, arg2, arg3, 1);
10825             } else {
10826                 ret = -host_to_target_errno(errno);
10827            }
10828         }
10829         return ret;
10830 #endif
10831 #if defined(TARGET_NR_pwritev)
10832     case TARGET_NR_pwritev:
10833         {
10834             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10835             if (vec != NULL) {
10836                 unsigned long low, high;
10837 
10838                 target_to_host_low_high(arg4, arg5, &low, &high);
10839                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10840                 unlock_iovec(vec, arg2, arg3, 0);
10841             } else {
10842                 ret = -host_to_target_errno(errno);
10843            }
10844         }
10845         return ret;
10846 #endif
10847     case TARGET_NR_getsid:
10848         return get_errno(getsid(arg1));
10849 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10850     case TARGET_NR_fdatasync:
10851         return get_errno(fdatasync(arg1));
10852 #endif
10853     case TARGET_NR_sched_getaffinity:
10854         {
10855             unsigned int mask_size;
10856             unsigned long *mask;
10857 
10858             /*
10859              * sched_getaffinity needs multiples of ulong, so need to take
10860              * care of mismatches between target ulong and host ulong sizes.
10861              */
10862             if (arg2 & (sizeof(abi_ulong) - 1)) {
10863                 return -TARGET_EINVAL;
10864             }
10865             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10866 
10867             mask = alloca(mask_size);
10868             memset(mask, 0, mask_size);
10869             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10870 
10871             if (!is_error(ret)) {
10872                 if (ret > arg2) {
10873                     /* More data returned than the caller's buffer will fit.
10874                      * This only happens if sizeof(abi_long) < sizeof(long)
10875                      * and the caller passed us a buffer holding an odd number
10876                      * of abi_longs. If the host kernel is actually using the
10877                      * extra 4 bytes then fail EINVAL; otherwise we can just
10878                      * ignore them and only copy the interesting part.
10879                      */
10880                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10881                     if (numcpus > arg2 * 8) {
10882                         return -TARGET_EINVAL;
10883                     }
10884                     ret = arg2;
10885                 }
10886 
10887                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10888                     return -TARGET_EFAULT;
10889                 }
10890             }
10891         }
10892         return ret;
10893     case TARGET_NR_sched_setaffinity:
10894         {
10895             unsigned int mask_size;
10896             unsigned long *mask;
10897 
10898             /*
10899              * sched_setaffinity needs multiples of ulong, so need to take
10900              * care of mismatches between target ulong and host ulong sizes.
10901              */
10902             if (arg2 & (sizeof(abi_ulong) - 1)) {
10903                 return -TARGET_EINVAL;
10904             }
10905             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10906             mask = alloca(mask_size);
10907 
10908             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10909             if (ret) {
10910                 return ret;
10911             }
10912 
10913             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10914         }
10915     case TARGET_NR_getcpu:
10916         {
10917             unsigned cpu, node;
10918             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10919                                        arg2 ? &node : NULL,
10920                                        NULL));
10921             if (is_error(ret)) {
10922                 return ret;
10923             }
10924             if (arg1 && put_user_u32(cpu, arg1)) {
10925                 return -TARGET_EFAULT;
10926             }
10927             if (arg2 && put_user_u32(node, arg2)) {
10928                 return -TARGET_EFAULT;
10929             }
10930         }
10931         return ret;
10932     case TARGET_NR_sched_setparam:
10933         {
10934             struct target_sched_param *target_schp;
10935             struct sched_param schp;
10936 
10937             if (arg2 == 0) {
10938                 return -TARGET_EINVAL;
10939             }
10940             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10941                 return -TARGET_EFAULT;
10942             }
10943             schp.sched_priority = tswap32(target_schp->sched_priority);
10944             unlock_user_struct(target_schp, arg2, 0);
10945             return get_errno(sys_sched_setparam(arg1, &schp));
10946         }
10947     case TARGET_NR_sched_getparam:
10948         {
10949             struct target_sched_param *target_schp;
10950             struct sched_param schp;
10951 
10952             if (arg2 == 0) {
10953                 return -TARGET_EINVAL;
10954             }
10955             ret = get_errno(sys_sched_getparam(arg1, &schp));
10956             if (!is_error(ret)) {
10957                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10958                     return -TARGET_EFAULT;
10959                 }
10960                 target_schp->sched_priority = tswap32(schp.sched_priority);
10961                 unlock_user_struct(target_schp, arg2, 1);
10962             }
10963         }
10964         return ret;
10965     case TARGET_NR_sched_setscheduler:
10966         {
10967             struct target_sched_param *target_schp;
10968             struct sched_param schp;
10969             if (arg3 == 0) {
10970                 return -TARGET_EINVAL;
10971             }
10972             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10973                 return -TARGET_EFAULT;
10974             }
10975             schp.sched_priority = tswap32(target_schp->sched_priority);
10976             unlock_user_struct(target_schp, arg3, 0);
10977             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10978         }
10979     case TARGET_NR_sched_getscheduler:
10980         return get_errno(sys_sched_getscheduler(arg1));
10981     case TARGET_NR_sched_getattr:
10982         {
10983             struct target_sched_attr *target_scha;
10984             struct sched_attr scha;
10985             if (arg2 == 0) {
10986                 return -TARGET_EINVAL;
10987             }
10988             if (arg3 > sizeof(scha)) {
10989                 arg3 = sizeof(scha);
10990             }
10991             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10992             if (!is_error(ret)) {
10993                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10994                 if (!target_scha) {
10995                     return -TARGET_EFAULT;
10996                 }
10997                 target_scha->size = tswap32(scha.size);
10998                 target_scha->sched_policy = tswap32(scha.sched_policy);
10999                 target_scha->sched_flags = tswap64(scha.sched_flags);
11000                 target_scha->sched_nice = tswap32(scha.sched_nice);
11001                 target_scha->sched_priority = tswap32(scha.sched_priority);
11002                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11003                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11004                 target_scha->sched_period = tswap64(scha.sched_period);
11005                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11006                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11007                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11008                 }
11009                 unlock_user(target_scha, arg2, arg3);
11010             }
11011             return ret;
11012         }
11013     case TARGET_NR_sched_setattr:
11014         {
11015             struct target_sched_attr *target_scha;
11016             struct sched_attr scha;
11017             uint32_t size;
11018             int zeroed;
11019             if (arg2 == 0) {
11020                 return -TARGET_EINVAL;
11021             }
11022             if (get_user_u32(size, arg2)) {
11023                 return -TARGET_EFAULT;
11024             }
11025             if (!size) {
11026                 size = offsetof(struct target_sched_attr, sched_util_min);
11027             }
11028             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11029                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11030                     return -TARGET_EFAULT;
11031                 }
11032                 return -TARGET_E2BIG;
11033             }
11034 
11035             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11036             if (zeroed < 0) {
11037                 return zeroed;
11038             } else if (zeroed == 0) {
11039                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11040                     return -TARGET_EFAULT;
11041                 }
11042                 return -TARGET_E2BIG;
11043             }
11044             if (size > sizeof(struct target_sched_attr)) {
11045                 size = sizeof(struct target_sched_attr);
11046             }
11047 
11048             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11049             if (!target_scha) {
11050                 return -TARGET_EFAULT;
11051             }
11052             scha.size = size;
11053             scha.sched_policy = tswap32(target_scha->sched_policy);
11054             scha.sched_flags = tswap64(target_scha->sched_flags);
11055             scha.sched_nice = tswap32(target_scha->sched_nice);
11056             scha.sched_priority = tswap32(target_scha->sched_priority);
11057             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11058             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11059             scha.sched_period = tswap64(target_scha->sched_period);
11060             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11061                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11062                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11063             }
11064             unlock_user(target_scha, arg2, 0);
11065             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11066         }
11067     case TARGET_NR_sched_yield:
11068         return get_errno(sched_yield());
11069     case TARGET_NR_sched_get_priority_max:
11070         return get_errno(sched_get_priority_max(arg1));
11071     case TARGET_NR_sched_get_priority_min:
11072         return get_errno(sched_get_priority_min(arg1));
11073 #ifdef TARGET_NR_sched_rr_get_interval
11074     case TARGET_NR_sched_rr_get_interval:
11075         {
11076             struct timespec ts;
11077             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11078             if (!is_error(ret)) {
11079                 ret = host_to_target_timespec(arg2, &ts);
11080             }
11081         }
11082         return ret;
11083 #endif
11084 #ifdef TARGET_NR_sched_rr_get_interval_time64
11085     case TARGET_NR_sched_rr_get_interval_time64:
11086         {
11087             struct timespec ts;
11088             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11089             if (!is_error(ret)) {
11090                 ret = host_to_target_timespec64(arg2, &ts);
11091             }
11092         }
11093         return ret;
11094 #endif
11095 #if defined(TARGET_NR_nanosleep)
11096     case TARGET_NR_nanosleep:
11097         {
11098             struct timespec req, rem;
11099             target_to_host_timespec(&req, arg1);
11100             ret = get_errno(safe_nanosleep(&req, &rem));
11101             if (is_error(ret) && arg2) {
11102                 host_to_target_timespec(arg2, &rem);
11103             }
11104         }
11105         return ret;
11106 #endif
11107     case TARGET_NR_prctl:
11108         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11109         break;
11110 #ifdef TARGET_NR_arch_prctl
11111     case TARGET_NR_arch_prctl:
11112         return do_arch_prctl(cpu_env, arg1, arg2);
11113 #endif
11114 #ifdef TARGET_NR_pread64
11115     case TARGET_NR_pread64:
11116         if (regpairs_aligned(cpu_env, num)) {
11117             arg4 = arg5;
11118             arg5 = arg6;
11119         }
11120         if (arg2 == 0 && arg3 == 0) {
11121             /* Special-case NULL buffer and zero length, which should succeed */
11122             p = 0;
11123         } else {
11124             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11125             if (!p) {
11126                 return -TARGET_EFAULT;
11127             }
11128         }
11129         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11130         unlock_user(p, arg2, ret);
11131         return ret;
11132     case TARGET_NR_pwrite64:
11133         if (regpairs_aligned(cpu_env, num)) {
11134             arg4 = arg5;
11135             arg5 = arg6;
11136         }
11137         if (arg2 == 0 && arg3 == 0) {
11138             /* Special-case NULL buffer and zero length, which should succeed */
11139             p = 0;
11140         } else {
11141             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11142             if (!p) {
11143                 return -TARGET_EFAULT;
11144             }
11145         }
11146         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11147         unlock_user(p, arg2, 0);
11148         return ret;
11149 #endif
11150     case TARGET_NR_getcwd:
11151         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11152             return -TARGET_EFAULT;
11153         ret = get_errno(sys_getcwd1(p, arg2));
11154         unlock_user(p, arg1, ret);
11155         return ret;
11156     case TARGET_NR_capget:
11157     case TARGET_NR_capset:
11158     {
11159         struct target_user_cap_header *target_header;
11160         struct target_user_cap_data *target_data = NULL;
11161         struct __user_cap_header_struct header;
11162         struct __user_cap_data_struct data[2];
11163         struct __user_cap_data_struct *dataptr = NULL;
11164         int i, target_datalen;
11165         int data_items = 1;
11166 
11167         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11168             return -TARGET_EFAULT;
11169         }
11170         header.version = tswap32(target_header->version);
11171         header.pid = tswap32(target_header->pid);
11172 
11173         if (header.version != _LINUX_CAPABILITY_VERSION) {
11174             /* Version 2 and up takes pointer to two user_data structs */
11175             data_items = 2;
11176         }
11177 
11178         target_datalen = sizeof(*target_data) * data_items;
11179 
11180         if (arg2) {
11181             if (num == TARGET_NR_capget) {
11182                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11183             } else {
11184                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11185             }
11186             if (!target_data) {
11187                 unlock_user_struct(target_header, arg1, 0);
11188                 return -TARGET_EFAULT;
11189             }
11190 
11191             if (num == TARGET_NR_capset) {
11192                 for (i = 0; i < data_items; i++) {
11193                     data[i].effective = tswap32(target_data[i].effective);
11194                     data[i].permitted = tswap32(target_data[i].permitted);
11195                     data[i].inheritable = tswap32(target_data[i].inheritable);
11196                 }
11197             }
11198 
11199             dataptr = data;
11200         }
11201 
11202         if (num == TARGET_NR_capget) {
11203             ret = get_errno(capget(&header, dataptr));
11204         } else {
11205             ret = get_errno(capset(&header, dataptr));
11206         }
11207 
11208         /* The kernel always updates version for both capget and capset */
11209         target_header->version = tswap32(header.version);
11210         unlock_user_struct(target_header, arg1, 1);
11211 
11212         if (arg2) {
11213             if (num == TARGET_NR_capget) {
11214                 for (i = 0; i < data_items; i++) {
11215                     target_data[i].effective = tswap32(data[i].effective);
11216                     target_data[i].permitted = tswap32(data[i].permitted);
11217                     target_data[i].inheritable = tswap32(data[i].inheritable);
11218                 }
11219                 unlock_user(target_data, arg2, target_datalen);
11220             } else {
11221                 unlock_user(target_data, arg2, 0);
11222             }
11223         }
11224         return ret;
11225     }
11226     case TARGET_NR_sigaltstack:
11227         return do_sigaltstack(arg1, arg2, cpu_env);
11228 
11229 #ifdef CONFIG_SENDFILE
11230 #ifdef TARGET_NR_sendfile
11231     case TARGET_NR_sendfile:
11232     {
11233         off_t *offp = NULL;
11234         off_t off;
11235         if (arg3) {
11236             ret = get_user_sal(off, arg3);
11237             if (is_error(ret)) {
11238                 return ret;
11239             }
11240             offp = &off;
11241         }
11242         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11243         if (!is_error(ret) && arg3) {
11244             abi_long ret2 = put_user_sal(off, arg3);
11245             if (is_error(ret2)) {
11246                 ret = ret2;
11247             }
11248         }
11249         return ret;
11250     }
11251 #endif
11252 #ifdef TARGET_NR_sendfile64
11253     case TARGET_NR_sendfile64:
11254     {
11255         off_t *offp = NULL;
11256         off_t off;
11257         if (arg3) {
11258             ret = get_user_s64(off, arg3);
11259             if (is_error(ret)) {
11260                 return ret;
11261             }
11262             offp = &off;
11263         }
11264         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11265         if (!is_error(ret) && arg3) {
11266             abi_long ret2 = put_user_s64(off, arg3);
11267             if (is_error(ret2)) {
11268                 ret = ret2;
11269             }
11270         }
11271         return ret;
11272     }
11273 #endif
11274 #endif
11275 #ifdef TARGET_NR_vfork
11276     case TARGET_NR_vfork:
11277         return get_errno(do_fork(cpu_env,
11278                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11279                          0, 0, 0, 0));
11280 #endif
11281 #ifdef TARGET_NR_ugetrlimit
11282     case TARGET_NR_ugetrlimit:
11283     {
11284 	struct rlimit rlim;
11285 	int resource = target_to_host_resource(arg1);
11286 	ret = get_errno(getrlimit(resource, &rlim));
11287 	if (!is_error(ret)) {
11288 	    struct target_rlimit *target_rlim;
11289             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11290                 return -TARGET_EFAULT;
11291 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11292 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11293             unlock_user_struct(target_rlim, arg2, 1);
11294 	}
11295         return ret;
11296     }
11297 #endif
11298 #ifdef TARGET_NR_truncate64
11299     case TARGET_NR_truncate64:
11300         if (!(p = lock_user_string(arg1)))
11301             return -TARGET_EFAULT;
11302 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11303         unlock_user(p, arg1, 0);
11304         return ret;
11305 #endif
11306 #ifdef TARGET_NR_ftruncate64
11307     case TARGET_NR_ftruncate64:
11308         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11309 #endif
11310 #ifdef TARGET_NR_stat64
11311     case TARGET_NR_stat64:
11312         if (!(p = lock_user_string(arg1))) {
11313             return -TARGET_EFAULT;
11314         }
11315         ret = get_errno(stat(path(p), &st));
11316         unlock_user(p, arg1, 0);
11317         if (!is_error(ret))
11318             ret = host_to_target_stat64(cpu_env, arg2, &st);
11319         return ret;
11320 #endif
11321 #ifdef TARGET_NR_lstat64
11322     case TARGET_NR_lstat64:
11323         if (!(p = lock_user_string(arg1))) {
11324             return -TARGET_EFAULT;
11325         }
11326         ret = get_errno(lstat(path(p), &st));
11327         unlock_user(p, arg1, 0);
11328         if (!is_error(ret))
11329             ret = host_to_target_stat64(cpu_env, arg2, &st);
11330         return ret;
11331 #endif
11332 #ifdef TARGET_NR_fstat64
11333     case TARGET_NR_fstat64:
11334         ret = get_errno(fstat(arg1, &st));
11335         if (!is_error(ret))
11336             ret = host_to_target_stat64(cpu_env, arg2, &st);
11337         return ret;
11338 #endif
11339 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11340 #ifdef TARGET_NR_fstatat64
11341     case TARGET_NR_fstatat64:
11342 #endif
11343 #ifdef TARGET_NR_newfstatat
11344     case TARGET_NR_newfstatat:
11345 #endif
11346         if (!(p = lock_user_string(arg2))) {
11347             return -TARGET_EFAULT;
11348         }
11349         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11350         unlock_user(p, arg2, 0);
11351         if (!is_error(ret))
11352             ret = host_to_target_stat64(cpu_env, arg3, &st);
11353         return ret;
11354 #endif
11355 #if defined(TARGET_NR_statx)
11356     case TARGET_NR_statx:
11357         {
11358             struct target_statx *target_stx;
11359             int dirfd = arg1;
11360             int flags = arg3;
11361 
11362             p = lock_user_string(arg2);
11363             if (p == NULL) {
11364                 return -TARGET_EFAULT;
11365             }
11366 #if defined(__NR_statx)
11367             {
11368                 /*
11369                  * It is assumed that struct statx is architecture independent.
11370                  */
11371                 struct target_statx host_stx;
11372                 int mask = arg4;
11373 
11374                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11375                 if (!is_error(ret)) {
11376                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11377                         unlock_user(p, arg2, 0);
11378                         return -TARGET_EFAULT;
11379                     }
11380                 }
11381 
11382                 if (ret != -TARGET_ENOSYS) {
11383                     unlock_user(p, arg2, 0);
11384                     return ret;
11385                 }
11386             }
11387 #endif
11388             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11389             unlock_user(p, arg2, 0);
11390 
11391             if (!is_error(ret)) {
11392                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11393                     return -TARGET_EFAULT;
11394                 }
11395                 memset(target_stx, 0, sizeof(*target_stx));
11396                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11397                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11398                 __put_user(st.st_ino, &target_stx->stx_ino);
11399                 __put_user(st.st_mode, &target_stx->stx_mode);
11400                 __put_user(st.st_uid, &target_stx->stx_uid);
11401                 __put_user(st.st_gid, &target_stx->stx_gid);
11402                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11403                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11404                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11405                 __put_user(st.st_size, &target_stx->stx_size);
11406                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11407                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11408                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11409                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11410                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11411                 unlock_user_struct(target_stx, arg5, 1);
11412             }
11413         }
11414         return ret;
11415 #endif
11416 #ifdef TARGET_NR_lchown
11417     case TARGET_NR_lchown:
11418         if (!(p = lock_user_string(arg1)))
11419             return -TARGET_EFAULT;
11420         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11421         unlock_user(p, arg1, 0);
11422         return ret;
11423 #endif
11424 #ifdef TARGET_NR_getuid
11425     case TARGET_NR_getuid:
11426         return get_errno(high2lowuid(getuid()));
11427 #endif
11428 #ifdef TARGET_NR_getgid
11429     case TARGET_NR_getgid:
11430         return get_errno(high2lowgid(getgid()));
11431 #endif
11432 #ifdef TARGET_NR_geteuid
11433     case TARGET_NR_geteuid:
11434         return get_errno(high2lowuid(geteuid()));
11435 #endif
11436 #ifdef TARGET_NR_getegid
11437     case TARGET_NR_getegid:
11438         return get_errno(high2lowgid(getegid()));
11439 #endif
11440     case TARGET_NR_setreuid:
11441         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11442     case TARGET_NR_setregid:
11443         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11444     case TARGET_NR_getgroups:
11445         {
11446             int gidsetsize = arg1;
11447             target_id *target_grouplist;
11448             gid_t *grouplist;
11449             int i;
11450 
11451             grouplist = alloca(gidsetsize * sizeof(gid_t));
11452             ret = get_errno(getgroups(gidsetsize, grouplist));
11453             if (gidsetsize == 0)
11454                 return ret;
11455             if (!is_error(ret)) {
11456                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11457                 if (!target_grouplist)
11458                     return -TARGET_EFAULT;
11459                 for(i = 0;i < ret; i++)
11460                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11461                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11462             }
11463         }
11464         return ret;
11465     case TARGET_NR_setgroups:
11466         {
11467             int gidsetsize = arg1;
11468             target_id *target_grouplist;
11469             gid_t *grouplist = NULL;
11470             int i;
11471             if (gidsetsize) {
11472                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11473                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11474                 if (!target_grouplist) {
11475                     return -TARGET_EFAULT;
11476                 }
11477                 for (i = 0; i < gidsetsize; i++) {
11478                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11479                 }
11480                 unlock_user(target_grouplist, arg2, 0);
11481             }
11482             return get_errno(setgroups(gidsetsize, grouplist));
11483         }
11484     case TARGET_NR_fchown:
11485         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11486 #if defined(TARGET_NR_fchownat)
11487     case TARGET_NR_fchownat:
11488         if (!(p = lock_user_string(arg2)))
11489             return -TARGET_EFAULT;
11490         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11491                                  low2highgid(arg4), arg5));
11492         unlock_user(p, arg2, 0);
11493         return ret;
11494 #endif
11495 #ifdef TARGET_NR_setresuid
11496     case TARGET_NR_setresuid:
11497         return get_errno(sys_setresuid(low2highuid(arg1),
11498                                        low2highuid(arg2),
11499                                        low2highuid(arg3)));
11500 #endif
11501 #ifdef TARGET_NR_getresuid
11502     case TARGET_NR_getresuid:
11503         {
11504             uid_t ruid, euid, suid;
11505             ret = get_errno(getresuid(&ruid, &euid, &suid));
11506             if (!is_error(ret)) {
11507                 if (put_user_id(high2lowuid(ruid), arg1)
11508                     || put_user_id(high2lowuid(euid), arg2)
11509                     || put_user_id(high2lowuid(suid), arg3))
11510                     return -TARGET_EFAULT;
11511             }
11512         }
11513         return ret;
11514 #endif
11515 #ifdef TARGET_NR_getresgid
11516     case TARGET_NR_setresgid:
11517         return get_errno(sys_setresgid(low2highgid(arg1),
11518                                        low2highgid(arg2),
11519                                        low2highgid(arg3)));
11520 #endif
11521 #ifdef TARGET_NR_getresgid
11522     case TARGET_NR_getresgid:
11523         {
11524             gid_t rgid, egid, sgid;
11525             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11526             if (!is_error(ret)) {
11527                 if (put_user_id(high2lowgid(rgid), arg1)
11528                     || put_user_id(high2lowgid(egid), arg2)
11529                     || put_user_id(high2lowgid(sgid), arg3))
11530                     return -TARGET_EFAULT;
11531             }
11532         }
11533         return ret;
11534 #endif
11535 #ifdef TARGET_NR_chown
11536     case TARGET_NR_chown:
11537         if (!(p = lock_user_string(arg1)))
11538             return -TARGET_EFAULT;
11539         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11540         unlock_user(p, arg1, 0);
11541         return ret;
11542 #endif
11543     case TARGET_NR_setuid:
11544         return get_errno(sys_setuid(low2highuid(arg1)));
11545     case TARGET_NR_setgid:
11546         return get_errno(sys_setgid(low2highgid(arg1)));
11547     case TARGET_NR_setfsuid:
11548         return get_errno(setfsuid(arg1));
11549     case TARGET_NR_setfsgid:
11550         return get_errno(setfsgid(arg1));
11551 
11552 #ifdef TARGET_NR_lchown32
11553     case TARGET_NR_lchown32:
11554         if (!(p = lock_user_string(arg1)))
11555             return -TARGET_EFAULT;
11556         ret = get_errno(lchown(p, arg2, arg3));
11557         unlock_user(p, arg1, 0);
11558         return ret;
11559 #endif
11560 #ifdef TARGET_NR_getuid32
11561     case TARGET_NR_getuid32:
11562         return get_errno(getuid());
11563 #endif
11564 
11565 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11566    /* Alpha specific */
11567     case TARGET_NR_getxuid:
11568          {
11569             uid_t euid;
11570             euid=geteuid();
11571             cpu_env->ir[IR_A4]=euid;
11572          }
11573         return get_errno(getuid());
11574 #endif
11575 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11576    /* Alpha specific */
11577     case TARGET_NR_getxgid:
11578          {
11579             uid_t egid;
11580             egid=getegid();
11581             cpu_env->ir[IR_A4]=egid;
11582          }
11583         return get_errno(getgid());
11584 #endif
11585 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11586     /* Alpha specific */
11587     case TARGET_NR_osf_getsysinfo:
11588         ret = -TARGET_EOPNOTSUPP;
11589         switch (arg1) {
11590           case TARGET_GSI_IEEE_FP_CONTROL:
11591             {
11592                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11593                 uint64_t swcr = cpu_env->swcr;
11594 
11595                 swcr &= ~SWCR_STATUS_MASK;
11596                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11597 
11598                 if (put_user_u64 (swcr, arg2))
11599                         return -TARGET_EFAULT;
11600                 ret = 0;
11601             }
11602             break;
11603 
11604           /* case GSI_IEEE_STATE_AT_SIGNAL:
11605              -- Not implemented in linux kernel.
11606              case GSI_UACPROC:
11607              -- Retrieves current unaligned access state; not much used.
11608              case GSI_PROC_TYPE:
11609              -- Retrieves implver information; surely not used.
11610              case GSI_GET_HWRPB:
11611              -- Grabs a copy of the HWRPB; surely not used.
11612           */
11613         }
11614         return ret;
11615 #endif
11616 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11617     /* Alpha specific */
11618     case TARGET_NR_osf_setsysinfo:
11619         ret = -TARGET_EOPNOTSUPP;
11620         switch (arg1) {
11621           case TARGET_SSI_IEEE_FP_CONTROL:
11622             {
11623                 uint64_t swcr, fpcr;
11624 
11625                 if (get_user_u64 (swcr, arg2)) {
11626                     return -TARGET_EFAULT;
11627                 }
11628 
11629                 /*
11630                  * The kernel calls swcr_update_status to update the
11631                  * status bits from the fpcr at every point that it
11632                  * could be queried.  Therefore, we store the status
11633                  * bits only in FPCR.
11634                  */
11635                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11636 
11637                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11638                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11639                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11640                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11641                 ret = 0;
11642             }
11643             break;
11644 
11645           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11646             {
11647                 uint64_t exc, fpcr, fex;
11648 
11649                 if (get_user_u64(exc, arg2)) {
11650                     return -TARGET_EFAULT;
11651                 }
11652                 exc &= SWCR_STATUS_MASK;
11653                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11654 
11655                 /* Old exceptions are not signaled.  */
11656                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11657                 fex = exc & ~fex;
11658                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11659                 fex &= (cpu_env)->swcr;
11660 
11661                 /* Update the hardware fpcr.  */
11662                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11663                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11664 
11665                 if (fex) {
11666                     int si_code = TARGET_FPE_FLTUNK;
11667                     target_siginfo_t info;
11668 
11669                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11670                         si_code = TARGET_FPE_FLTUND;
11671                     }
11672                     if (fex & SWCR_TRAP_ENABLE_INE) {
11673                         si_code = TARGET_FPE_FLTRES;
11674                     }
11675                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11676                         si_code = TARGET_FPE_FLTUND;
11677                     }
11678                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11679                         si_code = TARGET_FPE_FLTOVF;
11680                     }
11681                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11682                         si_code = TARGET_FPE_FLTDIV;
11683                     }
11684                     if (fex & SWCR_TRAP_ENABLE_INV) {
11685                         si_code = TARGET_FPE_FLTINV;
11686                     }
11687 
11688                     info.si_signo = SIGFPE;
11689                     info.si_errno = 0;
11690                     info.si_code = si_code;
11691                     info._sifields._sigfault._addr = (cpu_env)->pc;
11692                     queue_signal(cpu_env, info.si_signo,
11693                                  QEMU_SI_FAULT, &info);
11694                 }
11695                 ret = 0;
11696             }
11697             break;
11698 
11699           /* case SSI_NVPAIRS:
11700              -- Used with SSIN_UACPROC to enable unaligned accesses.
11701              case SSI_IEEE_STATE_AT_SIGNAL:
11702              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11703              -- Not implemented in linux kernel
11704           */
11705         }
11706         return ret;
11707 #endif
11708 #ifdef TARGET_NR_osf_sigprocmask
11709     /* Alpha specific.  */
11710     case TARGET_NR_osf_sigprocmask:
11711         {
11712             abi_ulong mask;
11713             int how;
11714             sigset_t set, oldset;
11715 
11716             switch(arg1) {
11717             case TARGET_SIG_BLOCK:
11718                 how = SIG_BLOCK;
11719                 break;
11720             case TARGET_SIG_UNBLOCK:
11721                 how = SIG_UNBLOCK;
11722                 break;
11723             case TARGET_SIG_SETMASK:
11724                 how = SIG_SETMASK;
11725                 break;
11726             default:
11727                 return -TARGET_EINVAL;
11728             }
11729             mask = arg2;
11730             target_to_host_old_sigset(&set, &mask);
11731             ret = do_sigprocmask(how, &set, &oldset);
11732             if (!ret) {
11733                 host_to_target_old_sigset(&mask, &oldset);
11734                 ret = mask;
11735             }
11736         }
11737         return ret;
11738 #endif
11739 
11740 #ifdef TARGET_NR_getgid32
11741     case TARGET_NR_getgid32:
11742         return get_errno(getgid());
11743 #endif
11744 #ifdef TARGET_NR_geteuid32
11745     case TARGET_NR_geteuid32:
11746         return get_errno(geteuid());
11747 #endif
11748 #ifdef TARGET_NR_getegid32
11749     case TARGET_NR_getegid32:
11750         return get_errno(getegid());
11751 #endif
11752 #ifdef TARGET_NR_setreuid32
11753     case TARGET_NR_setreuid32:
11754         return get_errno(setreuid(arg1, arg2));
11755 #endif
11756 #ifdef TARGET_NR_setregid32
11757     case TARGET_NR_setregid32:
11758         return get_errno(setregid(arg1, arg2));
11759 #endif
11760 #ifdef TARGET_NR_getgroups32
11761     case TARGET_NR_getgroups32:
11762         {
11763             int gidsetsize = arg1;
11764             uint32_t *target_grouplist;
11765             gid_t *grouplist;
11766             int i;
11767 
11768             grouplist = alloca(gidsetsize * sizeof(gid_t));
11769             ret = get_errno(getgroups(gidsetsize, grouplist));
11770             if (gidsetsize == 0)
11771                 return ret;
11772             if (!is_error(ret)) {
11773                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11774                 if (!target_grouplist) {
11775                     return -TARGET_EFAULT;
11776                 }
11777                 for(i = 0;i < ret; i++)
11778                     target_grouplist[i] = tswap32(grouplist[i]);
11779                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11780             }
11781         }
11782         return ret;
11783 #endif
11784 #ifdef TARGET_NR_setgroups32
11785     case TARGET_NR_setgroups32:
11786         {
11787             int gidsetsize = arg1;
11788             uint32_t *target_grouplist;
11789             gid_t *grouplist;
11790             int i;
11791 
11792             grouplist = alloca(gidsetsize * sizeof(gid_t));
11793             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11794             if (!target_grouplist) {
11795                 return -TARGET_EFAULT;
11796             }
11797             for(i = 0;i < gidsetsize; i++)
11798                 grouplist[i] = tswap32(target_grouplist[i]);
11799             unlock_user(target_grouplist, arg2, 0);
11800             return get_errno(setgroups(gidsetsize, grouplist));
11801         }
11802 #endif
11803 #ifdef TARGET_NR_fchown32
11804     case TARGET_NR_fchown32:
11805         return get_errno(fchown(arg1, arg2, arg3));
11806 #endif
11807 #ifdef TARGET_NR_setresuid32
11808     case TARGET_NR_setresuid32:
11809         return get_errno(sys_setresuid(arg1, arg2, arg3));
11810 #endif
11811 #ifdef TARGET_NR_getresuid32
11812     case TARGET_NR_getresuid32:
11813         {
11814             uid_t ruid, euid, suid;
11815             ret = get_errno(getresuid(&ruid, &euid, &suid));
11816             if (!is_error(ret)) {
11817                 if (put_user_u32(ruid, arg1)
11818                     || put_user_u32(euid, arg2)
11819                     || put_user_u32(suid, arg3))
11820                     return -TARGET_EFAULT;
11821             }
11822         }
11823         return ret;
11824 #endif
11825 #ifdef TARGET_NR_setresgid32
11826     case TARGET_NR_setresgid32:
11827         return get_errno(sys_setresgid(arg1, arg2, arg3));
11828 #endif
11829 #ifdef TARGET_NR_getresgid32
11830     case TARGET_NR_getresgid32:
11831         {
11832             gid_t rgid, egid, sgid;
11833             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11834             if (!is_error(ret)) {
11835                 if (put_user_u32(rgid, arg1)
11836                     || put_user_u32(egid, arg2)
11837                     || put_user_u32(sgid, arg3))
11838                     return -TARGET_EFAULT;
11839             }
11840         }
11841         return ret;
11842 #endif
11843 #ifdef TARGET_NR_chown32
11844     case TARGET_NR_chown32:
11845         if (!(p = lock_user_string(arg1)))
11846             return -TARGET_EFAULT;
11847         ret = get_errno(chown(p, arg2, arg3));
11848         unlock_user(p, arg1, 0);
11849         return ret;
11850 #endif
11851 #ifdef TARGET_NR_setuid32
11852     case TARGET_NR_setuid32:
11853         return get_errno(sys_setuid(arg1));
11854 #endif
11855 #ifdef TARGET_NR_setgid32
11856     case TARGET_NR_setgid32:
11857         return get_errno(sys_setgid(arg1));
11858 #endif
11859 #ifdef TARGET_NR_setfsuid32
11860     case TARGET_NR_setfsuid32:
11861         return get_errno(setfsuid(arg1));
11862 #endif
11863 #ifdef TARGET_NR_setfsgid32
11864     case TARGET_NR_setfsgid32:
11865         return get_errno(setfsgid(arg1));
11866 #endif
11867 #ifdef TARGET_NR_mincore
11868     case TARGET_NR_mincore:
11869         {
11870             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11871             if (!a) {
11872                 return -TARGET_ENOMEM;
11873             }
11874             p = lock_user_string(arg3);
11875             if (!p) {
11876                 ret = -TARGET_EFAULT;
11877             } else {
11878                 ret = get_errno(mincore(a, arg2, p));
11879                 unlock_user(p, arg3, ret);
11880             }
11881             unlock_user(a, arg1, 0);
11882         }
11883         return ret;
11884 #endif
11885 #ifdef TARGET_NR_arm_fadvise64_64
11886     case TARGET_NR_arm_fadvise64_64:
11887         /* arm_fadvise64_64 looks like fadvise64_64 but
11888          * with different argument order: fd, advice, offset, len
11889          * rather than the usual fd, offset, len, advice.
11890          * Note that offset and len are both 64-bit so appear as
11891          * pairs of 32-bit registers.
11892          */
11893         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11894                             target_offset64(arg5, arg6), arg2);
11895         return -host_to_target_errno(ret);
11896 #endif
11897 
11898 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11899 
11900 #ifdef TARGET_NR_fadvise64_64
11901     case TARGET_NR_fadvise64_64:
11902 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11903         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11904         ret = arg2;
11905         arg2 = arg3;
11906         arg3 = arg4;
11907         arg4 = arg5;
11908         arg5 = arg6;
11909         arg6 = ret;
11910 #else
11911         /* 6 args: fd, offset (high, low), len (high, low), advice */
11912         if (regpairs_aligned(cpu_env, num)) {
11913             /* offset is in (3,4), len in (5,6) and advice in 7 */
11914             arg2 = arg3;
11915             arg3 = arg4;
11916             arg4 = arg5;
11917             arg5 = arg6;
11918             arg6 = arg7;
11919         }
11920 #endif
11921         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11922                             target_offset64(arg4, arg5), arg6);
11923         return -host_to_target_errno(ret);
11924 #endif
11925 
11926 #ifdef TARGET_NR_fadvise64
11927     case TARGET_NR_fadvise64:
11928         /* 5 args: fd, offset (high, low), len, advice */
11929         if (regpairs_aligned(cpu_env, num)) {
11930             /* offset is in (3,4), len in 5 and advice in 6 */
11931             arg2 = arg3;
11932             arg3 = arg4;
11933             arg4 = arg5;
11934             arg5 = arg6;
11935         }
11936         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11937         return -host_to_target_errno(ret);
11938 #endif
11939 
11940 #else /* not a 32-bit ABI */
11941 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11942 #ifdef TARGET_NR_fadvise64_64
11943     case TARGET_NR_fadvise64_64:
11944 #endif
11945 #ifdef TARGET_NR_fadvise64
11946     case TARGET_NR_fadvise64:
11947 #endif
11948 #ifdef TARGET_S390X
11949         switch (arg4) {
11950         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11951         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11952         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11953         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11954         default: break;
11955         }
11956 #endif
11957         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11958 #endif
11959 #endif /* end of 64-bit ABI fadvise handling */
11960 
11961 #ifdef TARGET_NR_madvise
11962     case TARGET_NR_madvise:
11963         return target_madvise(arg1, arg2, arg3);
11964 #endif
11965 #ifdef TARGET_NR_fcntl64
11966     case TARGET_NR_fcntl64:
11967     {
11968         int cmd;
11969         struct flock64 fl;
11970         from_flock64_fn *copyfrom = copy_from_user_flock64;
11971         to_flock64_fn *copyto = copy_to_user_flock64;
11972 
11973 #ifdef TARGET_ARM
11974         if (!cpu_env->eabi) {
11975             copyfrom = copy_from_user_oabi_flock64;
11976             copyto = copy_to_user_oabi_flock64;
11977         }
11978 #endif
11979 
11980         cmd = target_to_host_fcntl_cmd(arg2);
11981         if (cmd == -TARGET_EINVAL) {
11982             return cmd;
11983         }
11984 
11985         switch(arg2) {
11986         case TARGET_F_GETLK64:
11987             ret = copyfrom(&fl, arg3);
11988             if (ret) {
11989                 break;
11990             }
11991             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11992             if (ret == 0) {
11993                 ret = copyto(arg3, &fl);
11994             }
11995 	    break;
11996 
11997         case TARGET_F_SETLK64:
11998         case TARGET_F_SETLKW64:
11999             ret = copyfrom(&fl, arg3);
12000             if (ret) {
12001                 break;
12002             }
12003             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12004 	    break;
12005         default:
12006             ret = do_fcntl(arg1, arg2, arg3);
12007             break;
12008         }
12009         return ret;
12010     }
12011 #endif
12012 #ifdef TARGET_NR_cacheflush
12013     case TARGET_NR_cacheflush:
12014         /* self-modifying code is handled automatically, so nothing needed */
12015         return 0;
12016 #endif
12017 #ifdef TARGET_NR_getpagesize
12018     case TARGET_NR_getpagesize:
12019         return TARGET_PAGE_SIZE;
12020 #endif
12021     case TARGET_NR_gettid:
12022         return get_errno(sys_gettid());
12023 #ifdef TARGET_NR_readahead
12024     case TARGET_NR_readahead:
12025 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12026         if (regpairs_aligned(cpu_env, num)) {
12027             arg2 = arg3;
12028             arg3 = arg4;
12029             arg4 = arg5;
12030         }
12031         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12032 #else
12033         ret = get_errno(readahead(arg1, arg2, arg3));
12034 #endif
12035         return ret;
12036 #endif
12037 #ifdef CONFIG_ATTR
12038 #ifdef TARGET_NR_setxattr
12039     case TARGET_NR_listxattr:
12040     case TARGET_NR_llistxattr:
12041     {
12042         void *p, *b = 0;
12043         if (arg2) {
12044             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12045             if (!b) {
12046                 return -TARGET_EFAULT;
12047             }
12048         }
12049         p = lock_user_string(arg1);
12050         if (p) {
12051             if (num == TARGET_NR_listxattr) {
12052                 ret = get_errno(listxattr(p, b, arg3));
12053             } else {
12054                 ret = get_errno(llistxattr(p, b, arg3));
12055             }
12056         } else {
12057             ret = -TARGET_EFAULT;
12058         }
12059         unlock_user(p, arg1, 0);
12060         unlock_user(b, arg2, arg3);
12061         return ret;
12062     }
12063     case TARGET_NR_flistxattr:
12064     {
12065         void *b = 0;
12066         if (arg2) {
12067             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12068             if (!b) {
12069                 return -TARGET_EFAULT;
12070             }
12071         }
12072         ret = get_errno(flistxattr(arg1, b, arg3));
12073         unlock_user(b, arg2, arg3);
12074         return ret;
12075     }
12076     case TARGET_NR_setxattr:
12077     case TARGET_NR_lsetxattr:
12078         {
12079             void *p, *n, *v = 0;
12080             if (arg3) {
12081                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12082                 if (!v) {
12083                     return -TARGET_EFAULT;
12084                 }
12085             }
12086             p = lock_user_string(arg1);
12087             n = lock_user_string(arg2);
12088             if (p && n) {
12089                 if (num == TARGET_NR_setxattr) {
12090                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12091                 } else {
12092                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12093                 }
12094             } else {
12095                 ret = -TARGET_EFAULT;
12096             }
12097             unlock_user(p, arg1, 0);
12098             unlock_user(n, arg2, 0);
12099             unlock_user(v, arg3, 0);
12100         }
12101         return ret;
12102     case TARGET_NR_fsetxattr:
12103         {
12104             void *n, *v = 0;
12105             if (arg3) {
12106                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12107                 if (!v) {
12108                     return -TARGET_EFAULT;
12109                 }
12110             }
12111             n = lock_user_string(arg2);
12112             if (n) {
12113                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12114             } else {
12115                 ret = -TARGET_EFAULT;
12116             }
12117             unlock_user(n, arg2, 0);
12118             unlock_user(v, arg3, 0);
12119         }
12120         return ret;
12121     case TARGET_NR_getxattr:
12122     case TARGET_NR_lgetxattr:
12123         {
12124             void *p, *n, *v = 0;
12125             if (arg3) {
12126                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12127                 if (!v) {
12128                     return -TARGET_EFAULT;
12129                 }
12130             }
12131             p = lock_user_string(arg1);
12132             n = lock_user_string(arg2);
12133             if (p && n) {
12134                 if (num == TARGET_NR_getxattr) {
12135                     ret = get_errno(getxattr(p, n, v, arg4));
12136                 } else {
12137                     ret = get_errno(lgetxattr(p, n, v, arg4));
12138                 }
12139             } else {
12140                 ret = -TARGET_EFAULT;
12141             }
12142             unlock_user(p, arg1, 0);
12143             unlock_user(n, arg2, 0);
12144             unlock_user(v, arg3, arg4);
12145         }
12146         return ret;
12147     case TARGET_NR_fgetxattr:
12148         {
12149             void *n, *v = 0;
12150             if (arg3) {
12151                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12152                 if (!v) {
12153                     return -TARGET_EFAULT;
12154                 }
12155             }
12156             n = lock_user_string(arg2);
12157             if (n) {
12158                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12159             } else {
12160                 ret = -TARGET_EFAULT;
12161             }
12162             unlock_user(n, arg2, 0);
12163             unlock_user(v, arg3, arg4);
12164         }
12165         return ret;
12166     case TARGET_NR_removexattr:
12167     case TARGET_NR_lremovexattr:
12168         {
12169             void *p, *n;
12170             p = lock_user_string(arg1);
12171             n = lock_user_string(arg2);
12172             if (p && n) {
12173                 if (num == TARGET_NR_removexattr) {
12174                     ret = get_errno(removexattr(p, n));
12175                 } else {
12176                     ret = get_errno(lremovexattr(p, n));
12177                 }
12178             } else {
12179                 ret = -TARGET_EFAULT;
12180             }
12181             unlock_user(p, arg1, 0);
12182             unlock_user(n, arg2, 0);
12183         }
12184         return ret;
12185     case TARGET_NR_fremovexattr:
12186         {
12187             void *n;
12188             n = lock_user_string(arg2);
12189             if (n) {
12190                 ret = get_errno(fremovexattr(arg1, n));
12191             } else {
12192                 ret = -TARGET_EFAULT;
12193             }
12194             unlock_user(n, arg2, 0);
12195         }
12196         return ret;
12197 #endif
12198 #endif /* CONFIG_ATTR */
12199 #ifdef TARGET_NR_set_thread_area
12200     case TARGET_NR_set_thread_area:
12201 #if defined(TARGET_MIPS)
12202       cpu_env->active_tc.CP0_UserLocal = arg1;
12203       return 0;
12204 #elif defined(TARGET_CRIS)
12205       if (arg1 & 0xff)
12206           ret = -TARGET_EINVAL;
12207       else {
12208           cpu_env->pregs[PR_PID] = arg1;
12209           ret = 0;
12210       }
12211       return ret;
12212 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12213       return do_set_thread_area(cpu_env, arg1);
12214 #elif defined(TARGET_M68K)
12215       {
12216           TaskState *ts = cpu->opaque;
12217           ts->tp_value = arg1;
12218           return 0;
12219       }
12220 #else
12221       return -TARGET_ENOSYS;
12222 #endif
12223 #endif
12224 #ifdef TARGET_NR_get_thread_area
12225     case TARGET_NR_get_thread_area:
12226 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12227         return do_get_thread_area(cpu_env, arg1);
12228 #elif defined(TARGET_M68K)
12229         {
12230             TaskState *ts = cpu->opaque;
12231             return ts->tp_value;
12232         }
12233 #else
12234         return -TARGET_ENOSYS;
12235 #endif
12236 #endif
12237 #ifdef TARGET_NR_getdomainname
12238     case TARGET_NR_getdomainname:
12239         return -TARGET_ENOSYS;
12240 #endif
12241 
12242 #ifdef TARGET_NR_clock_settime
12243     case TARGET_NR_clock_settime:
12244     {
12245         struct timespec ts;
12246 
12247         ret = target_to_host_timespec(&ts, arg2);
12248         if (!is_error(ret)) {
12249             ret = get_errno(clock_settime(arg1, &ts));
12250         }
12251         return ret;
12252     }
12253 #endif
12254 #ifdef TARGET_NR_clock_settime64
12255     case TARGET_NR_clock_settime64:
12256     {
12257         struct timespec ts;
12258 
12259         ret = target_to_host_timespec64(&ts, arg2);
12260         if (!is_error(ret)) {
12261             ret = get_errno(clock_settime(arg1, &ts));
12262         }
12263         return ret;
12264     }
12265 #endif
12266 #ifdef TARGET_NR_clock_gettime
12267     case TARGET_NR_clock_gettime:
12268     {
12269         struct timespec ts;
12270         ret = get_errno(clock_gettime(arg1, &ts));
12271         if (!is_error(ret)) {
12272             ret = host_to_target_timespec(arg2, &ts);
12273         }
12274         return ret;
12275     }
12276 #endif
12277 #ifdef TARGET_NR_clock_gettime64
12278     case TARGET_NR_clock_gettime64:
12279     {
12280         struct timespec ts;
12281         ret = get_errno(clock_gettime(arg1, &ts));
12282         if (!is_error(ret)) {
12283             ret = host_to_target_timespec64(arg2, &ts);
12284         }
12285         return ret;
12286     }
12287 #endif
12288 #ifdef TARGET_NR_clock_getres
12289     case TARGET_NR_clock_getres:
12290     {
12291         struct timespec ts;
12292         ret = get_errno(clock_getres(arg1, &ts));
12293         if (!is_error(ret)) {
12294             host_to_target_timespec(arg2, &ts);
12295         }
12296         return ret;
12297     }
12298 #endif
12299 #ifdef TARGET_NR_clock_getres_time64
12300     case TARGET_NR_clock_getres_time64:
12301     {
12302         struct timespec ts;
12303         ret = get_errno(clock_getres(arg1, &ts));
12304         if (!is_error(ret)) {
12305             host_to_target_timespec64(arg2, &ts);
12306         }
12307         return ret;
12308     }
12309 #endif
12310 #ifdef TARGET_NR_clock_nanosleep
12311     case TARGET_NR_clock_nanosleep:
12312     {
12313         struct timespec ts;
12314         if (target_to_host_timespec(&ts, arg3)) {
12315             return -TARGET_EFAULT;
12316         }
12317         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12318                                              &ts, arg4 ? &ts : NULL));
12319         /*
12320          * if the call is interrupted by a signal handler, it fails
12321          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12322          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12323          */
12324         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12325             host_to_target_timespec(arg4, &ts)) {
12326               return -TARGET_EFAULT;
12327         }
12328 
12329         return ret;
12330     }
12331 #endif
12332 #ifdef TARGET_NR_clock_nanosleep_time64
12333     case TARGET_NR_clock_nanosleep_time64:
12334     {
12335         struct timespec ts;
12336 
12337         if (target_to_host_timespec64(&ts, arg3)) {
12338             return -TARGET_EFAULT;
12339         }
12340 
12341         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12342                                              &ts, arg4 ? &ts : NULL));
12343 
12344         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12345             host_to_target_timespec64(arg4, &ts)) {
12346             return -TARGET_EFAULT;
12347         }
12348         return ret;
12349     }
12350 #endif
12351 
12352 #if defined(TARGET_NR_set_tid_address)
12353     case TARGET_NR_set_tid_address:
12354     {
12355         TaskState *ts = cpu->opaque;
12356         ts->child_tidptr = arg1;
12357         /* do not call host set_tid_address() syscall, instead return tid() */
12358         return get_errno(sys_gettid());
12359     }
12360 #endif
12361 
12362     case TARGET_NR_tkill:
12363         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12364 
12365     case TARGET_NR_tgkill:
12366         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12367                          target_to_host_signal(arg3)));
12368 
12369 #ifdef TARGET_NR_set_robust_list
12370     case TARGET_NR_set_robust_list:
12371     case TARGET_NR_get_robust_list:
12372         /* The ABI for supporting robust futexes has userspace pass
12373          * the kernel a pointer to a linked list which is updated by
12374          * userspace after the syscall; the list is walked by the kernel
12375          * when the thread exits. Since the linked list in QEMU guest
12376          * memory isn't a valid linked list for the host and we have
12377          * no way to reliably intercept the thread-death event, we can't
12378          * support these. Silently return ENOSYS so that guest userspace
12379          * falls back to a non-robust futex implementation (which should
12380          * be OK except in the corner case of the guest crashing while
12381          * holding a mutex that is shared with another process via
12382          * shared memory).
12383          */
12384         return -TARGET_ENOSYS;
12385 #endif
12386 
12387 #if defined(TARGET_NR_utimensat)
12388     case TARGET_NR_utimensat:
12389         {
12390             struct timespec *tsp, ts[2];
12391             if (!arg3) {
12392                 tsp = NULL;
12393             } else {
12394                 if (target_to_host_timespec(ts, arg3)) {
12395                     return -TARGET_EFAULT;
12396                 }
12397                 if (target_to_host_timespec(ts + 1, arg3 +
12398                                             sizeof(struct target_timespec))) {
12399                     return -TARGET_EFAULT;
12400                 }
12401                 tsp = ts;
12402             }
12403             if (!arg2)
12404                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12405             else {
12406                 if (!(p = lock_user_string(arg2))) {
12407                     return -TARGET_EFAULT;
12408                 }
12409                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12410                 unlock_user(p, arg2, 0);
12411             }
12412         }
12413         return ret;
12414 #endif
12415 #ifdef TARGET_NR_utimensat_time64
12416     case TARGET_NR_utimensat_time64:
12417         {
12418             struct timespec *tsp, ts[2];
12419             if (!arg3) {
12420                 tsp = NULL;
12421             } else {
12422                 if (target_to_host_timespec64(ts, arg3)) {
12423                     return -TARGET_EFAULT;
12424                 }
12425                 if (target_to_host_timespec64(ts + 1, arg3 +
12426                                      sizeof(struct target__kernel_timespec))) {
12427                     return -TARGET_EFAULT;
12428                 }
12429                 tsp = ts;
12430             }
12431             if (!arg2)
12432                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12433             else {
12434                 p = lock_user_string(arg2);
12435                 if (!p) {
12436                     return -TARGET_EFAULT;
12437                 }
12438                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12439                 unlock_user(p, arg2, 0);
12440             }
12441         }
12442         return ret;
12443 #endif
12444 #ifdef TARGET_NR_futex
12445     case TARGET_NR_futex:
12446         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12447 #endif
12448 #ifdef TARGET_NR_futex_time64
12449     case TARGET_NR_futex_time64:
12450         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12451 #endif
12452 #ifdef CONFIG_INOTIFY
12453 #if defined(TARGET_NR_inotify_init)
12454     case TARGET_NR_inotify_init:
12455         ret = get_errno(inotify_init());
12456         if (ret >= 0) {
12457             fd_trans_register(ret, &target_inotify_trans);
12458         }
12459         return ret;
12460 #endif
12461 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12462     case TARGET_NR_inotify_init1:
12463         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12464                                           fcntl_flags_tbl)));
12465         if (ret >= 0) {
12466             fd_trans_register(ret, &target_inotify_trans);
12467         }
12468         return ret;
12469 #endif
12470 #if defined(TARGET_NR_inotify_add_watch)
12471     case TARGET_NR_inotify_add_watch:
12472         p = lock_user_string(arg2);
12473         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12474         unlock_user(p, arg2, 0);
12475         return ret;
12476 #endif
12477 #if defined(TARGET_NR_inotify_rm_watch)
12478     case TARGET_NR_inotify_rm_watch:
12479         return get_errno(inotify_rm_watch(arg1, arg2));
12480 #endif
12481 #endif
12482 
12483 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12484     case TARGET_NR_mq_open:
12485         {
12486             struct mq_attr posix_mq_attr;
12487             struct mq_attr *pposix_mq_attr;
12488             int host_flags;
12489 
12490             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12491             pposix_mq_attr = NULL;
12492             if (arg4) {
12493                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12494                     return -TARGET_EFAULT;
12495                 }
12496                 pposix_mq_attr = &posix_mq_attr;
12497             }
12498             p = lock_user_string(arg1 - 1);
12499             if (!p) {
12500                 return -TARGET_EFAULT;
12501             }
12502             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12503             unlock_user (p, arg1, 0);
12504         }
12505         return ret;
12506 
12507     case TARGET_NR_mq_unlink:
12508         p = lock_user_string(arg1 - 1);
12509         if (!p) {
12510             return -TARGET_EFAULT;
12511         }
12512         ret = get_errno(mq_unlink(p));
12513         unlock_user (p, arg1, 0);
12514         return ret;
12515 
12516 #ifdef TARGET_NR_mq_timedsend
12517     case TARGET_NR_mq_timedsend:
12518         {
12519             struct timespec ts;
12520 
12521             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12522             if (arg5 != 0) {
12523                 if (target_to_host_timespec(&ts, arg5)) {
12524                     return -TARGET_EFAULT;
12525                 }
12526                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12527                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12528                     return -TARGET_EFAULT;
12529                 }
12530             } else {
12531                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12532             }
12533             unlock_user (p, arg2, arg3);
12534         }
12535         return ret;
12536 #endif
12537 #ifdef TARGET_NR_mq_timedsend_time64
12538     case TARGET_NR_mq_timedsend_time64:
12539         {
12540             struct timespec ts;
12541 
12542             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12543             if (arg5 != 0) {
12544                 if (target_to_host_timespec64(&ts, arg5)) {
12545                     return -TARGET_EFAULT;
12546                 }
12547                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12548                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12549                     return -TARGET_EFAULT;
12550                 }
12551             } else {
12552                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12553             }
12554             unlock_user(p, arg2, arg3);
12555         }
12556         return ret;
12557 #endif
12558 
12559 #ifdef TARGET_NR_mq_timedreceive
12560     case TARGET_NR_mq_timedreceive:
12561         {
12562             struct timespec ts;
12563             unsigned int prio;
12564 
12565             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12566             if (arg5 != 0) {
12567                 if (target_to_host_timespec(&ts, arg5)) {
12568                     return -TARGET_EFAULT;
12569                 }
12570                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12571                                                      &prio, &ts));
12572                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12573                     return -TARGET_EFAULT;
12574                 }
12575             } else {
12576                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12577                                                      &prio, NULL));
12578             }
12579             unlock_user (p, arg2, arg3);
12580             if (arg4 != 0)
12581                 put_user_u32(prio, arg4);
12582         }
12583         return ret;
12584 #endif
12585 #ifdef TARGET_NR_mq_timedreceive_time64
12586     case TARGET_NR_mq_timedreceive_time64:
12587         {
12588             struct timespec ts;
12589             unsigned int prio;
12590 
12591             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12592             if (arg5 != 0) {
12593                 if (target_to_host_timespec64(&ts, arg5)) {
12594                     return -TARGET_EFAULT;
12595                 }
12596                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12597                                                      &prio, &ts));
12598                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12599                     return -TARGET_EFAULT;
12600                 }
12601             } else {
12602                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12603                                                      &prio, NULL));
12604             }
12605             unlock_user(p, arg2, arg3);
12606             if (arg4 != 0) {
12607                 put_user_u32(prio, arg4);
12608             }
12609         }
12610         return ret;
12611 #endif
12612 
12613     /* Not implemented for now... */
12614 /*     case TARGET_NR_mq_notify: */
12615 /*         break; */
12616 
12617     case TARGET_NR_mq_getsetattr:
12618         {
12619             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12620             ret = 0;
12621             if (arg2 != 0) {
12622                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12623                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12624                                            &posix_mq_attr_out));
12625             } else if (arg3 != 0) {
12626                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12627             }
12628             if (ret == 0 && arg3 != 0) {
12629                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12630             }
12631         }
12632         return ret;
12633 #endif
12634 
12635 #ifdef CONFIG_SPLICE
12636 #ifdef TARGET_NR_tee
12637     case TARGET_NR_tee:
12638         {
12639             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12640         }
12641         return ret;
12642 #endif
12643 #ifdef TARGET_NR_splice
12644     case TARGET_NR_splice:
12645         {
12646             loff_t loff_in, loff_out;
12647             loff_t *ploff_in = NULL, *ploff_out = NULL;
12648             if (arg2) {
12649                 if (get_user_u64(loff_in, arg2)) {
12650                     return -TARGET_EFAULT;
12651                 }
12652                 ploff_in = &loff_in;
12653             }
12654             if (arg4) {
12655                 if (get_user_u64(loff_out, arg4)) {
12656                     return -TARGET_EFAULT;
12657                 }
12658                 ploff_out = &loff_out;
12659             }
12660             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12661             if (arg2) {
12662                 if (put_user_u64(loff_in, arg2)) {
12663                     return -TARGET_EFAULT;
12664                 }
12665             }
12666             if (arg4) {
12667                 if (put_user_u64(loff_out, arg4)) {
12668                     return -TARGET_EFAULT;
12669                 }
12670             }
12671         }
12672         return ret;
12673 #endif
12674 #ifdef TARGET_NR_vmsplice
12675 	case TARGET_NR_vmsplice:
12676         {
12677             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12678             if (vec != NULL) {
12679                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12680                 unlock_iovec(vec, arg2, arg3, 0);
12681             } else {
12682                 ret = -host_to_target_errno(errno);
12683             }
12684         }
12685         return ret;
12686 #endif
12687 #endif /* CONFIG_SPLICE */
12688 #ifdef CONFIG_EVENTFD
12689 #if defined(TARGET_NR_eventfd)
12690     case TARGET_NR_eventfd:
12691         ret = get_errno(eventfd(arg1, 0));
12692         if (ret >= 0) {
12693             fd_trans_register(ret, &target_eventfd_trans);
12694         }
12695         return ret;
12696 #endif
12697 #if defined(TARGET_NR_eventfd2)
12698     case TARGET_NR_eventfd2:
12699     {
12700         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12701         if (arg2 & TARGET_O_NONBLOCK) {
12702             host_flags |= O_NONBLOCK;
12703         }
12704         if (arg2 & TARGET_O_CLOEXEC) {
12705             host_flags |= O_CLOEXEC;
12706         }
12707         ret = get_errno(eventfd(arg1, host_flags));
12708         if (ret >= 0) {
12709             fd_trans_register(ret, &target_eventfd_trans);
12710         }
12711         return ret;
12712     }
12713 #endif
12714 #endif /* CONFIG_EVENTFD  */
12715 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12716     case TARGET_NR_fallocate:
12717 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12718         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12719                                   target_offset64(arg5, arg6)));
12720 #else
12721         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12722 #endif
12723         return ret;
12724 #endif
12725 #if defined(CONFIG_SYNC_FILE_RANGE)
12726 #if defined(TARGET_NR_sync_file_range)
12727     case TARGET_NR_sync_file_range:
12728 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12729 #if defined(TARGET_MIPS)
12730         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12731                                         target_offset64(arg5, arg6), arg7));
12732 #else
12733         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12734                                         target_offset64(arg4, arg5), arg6));
12735 #endif /* !TARGET_MIPS */
12736 #else
12737         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12738 #endif
12739         return ret;
12740 #endif
12741 #if defined(TARGET_NR_sync_file_range2) || \
12742     defined(TARGET_NR_arm_sync_file_range)
12743 #if defined(TARGET_NR_sync_file_range2)
12744     case TARGET_NR_sync_file_range2:
12745 #endif
12746 #if defined(TARGET_NR_arm_sync_file_range)
12747     case TARGET_NR_arm_sync_file_range:
12748 #endif
12749         /* This is like sync_file_range but the arguments are reordered */
12750 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12751         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12752                                         target_offset64(arg5, arg6), arg2));
12753 #else
12754         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12755 #endif
12756         return ret;
12757 #endif
12758 #endif
12759 #if defined(TARGET_NR_signalfd4)
12760     case TARGET_NR_signalfd4:
12761         return do_signalfd4(arg1, arg2, arg4);
12762 #endif
12763 #if defined(TARGET_NR_signalfd)
12764     case TARGET_NR_signalfd:
12765         return do_signalfd4(arg1, arg2, 0);
12766 #endif
12767 #if defined(CONFIG_EPOLL)
12768 #if defined(TARGET_NR_epoll_create)
12769     case TARGET_NR_epoll_create:
12770         return get_errno(epoll_create(arg1));
12771 #endif
12772 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12773     case TARGET_NR_epoll_create1:
12774         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12775 #endif
12776 #if defined(TARGET_NR_epoll_ctl)
12777     case TARGET_NR_epoll_ctl:
12778     {
12779         struct epoll_event ep;
12780         struct epoll_event *epp = 0;
12781         if (arg4) {
12782             if (arg2 != EPOLL_CTL_DEL) {
12783                 struct target_epoll_event *target_ep;
12784                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12785                     return -TARGET_EFAULT;
12786                 }
12787                 ep.events = tswap32(target_ep->events);
12788                 /*
12789                  * The epoll_data_t union is just opaque data to the kernel,
12790                  * so we transfer all 64 bits across and need not worry what
12791                  * actual data type it is.
12792                  */
12793                 ep.data.u64 = tswap64(target_ep->data.u64);
12794                 unlock_user_struct(target_ep, arg4, 0);
12795             }
12796             /*
12797              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12798              * non-null pointer, even though this argument is ignored.
12799              *
12800              */
12801             epp = &ep;
12802         }
12803         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12804     }
12805 #endif
12806 
12807 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12808 #if defined(TARGET_NR_epoll_wait)
12809     case TARGET_NR_epoll_wait:
12810 #endif
12811 #if defined(TARGET_NR_epoll_pwait)
12812     case TARGET_NR_epoll_pwait:
12813 #endif
12814     {
12815         struct target_epoll_event *target_ep;
12816         struct epoll_event *ep;
12817         int epfd = arg1;
12818         int maxevents = arg3;
12819         int timeout = arg4;
12820 
12821         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12822             return -TARGET_EINVAL;
12823         }
12824 
12825         target_ep = lock_user(VERIFY_WRITE, arg2,
12826                               maxevents * sizeof(struct target_epoll_event), 1);
12827         if (!target_ep) {
12828             return -TARGET_EFAULT;
12829         }
12830 
12831         ep = g_try_new(struct epoll_event, maxevents);
12832         if (!ep) {
12833             unlock_user(target_ep, arg2, 0);
12834             return -TARGET_ENOMEM;
12835         }
12836 
12837         switch (num) {
12838 #if defined(TARGET_NR_epoll_pwait)
12839         case TARGET_NR_epoll_pwait:
12840         {
12841             sigset_t *set = NULL;
12842 
12843             if (arg5) {
12844                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12845                 if (ret != 0) {
12846                     break;
12847                 }
12848             }
12849 
12850             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12851                                              set, SIGSET_T_SIZE));
12852 
12853             if (set) {
12854                 finish_sigsuspend_mask(ret);
12855             }
12856             break;
12857         }
12858 #endif
12859 #if defined(TARGET_NR_epoll_wait)
12860         case TARGET_NR_epoll_wait:
12861             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12862                                              NULL, 0));
12863             break;
12864 #endif
12865         default:
12866             ret = -TARGET_ENOSYS;
12867         }
12868         if (!is_error(ret)) {
12869             int i;
12870             for (i = 0; i < ret; i++) {
12871                 target_ep[i].events = tswap32(ep[i].events);
12872                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12873             }
12874             unlock_user(target_ep, arg2,
12875                         ret * sizeof(struct target_epoll_event));
12876         } else {
12877             unlock_user(target_ep, arg2, 0);
12878         }
12879         g_free(ep);
12880         return ret;
12881     }
12882 #endif
12883 #endif
12884 #ifdef TARGET_NR_prlimit64
12885     case TARGET_NR_prlimit64:
12886     {
12887         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12888         struct target_rlimit64 *target_rnew, *target_rold;
12889         struct host_rlimit64 rnew, rold, *rnewp = 0;
12890         int resource = target_to_host_resource(arg2);
12891 
12892         if (arg3 && (resource != RLIMIT_AS &&
12893                      resource != RLIMIT_DATA &&
12894                      resource != RLIMIT_STACK)) {
12895             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12896                 return -TARGET_EFAULT;
12897             }
12898             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
12899             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
12900             unlock_user_struct(target_rnew, arg3, 0);
12901             rnewp = &rnew;
12902         }
12903 
12904         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12905         if (!is_error(ret) && arg4) {
12906             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12907                 return -TARGET_EFAULT;
12908             }
12909             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
12910             __put_user(rold.rlim_max, &target_rold->rlim_max);
12911             unlock_user_struct(target_rold, arg4, 1);
12912         }
12913         return ret;
12914     }
12915 #endif
12916 #ifdef TARGET_NR_gethostname
12917     case TARGET_NR_gethostname:
12918     {
12919         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12920         if (name) {
12921             ret = get_errno(gethostname(name, arg2));
12922             unlock_user(name, arg1, arg2);
12923         } else {
12924             ret = -TARGET_EFAULT;
12925         }
12926         return ret;
12927     }
12928 #endif
12929 #ifdef TARGET_NR_atomic_cmpxchg_32
12930     case TARGET_NR_atomic_cmpxchg_32:
12931     {
12932         /* should use start_exclusive from main.c */
12933         abi_ulong mem_value;
12934         if (get_user_u32(mem_value, arg6)) {
12935             target_siginfo_t info;
12936             info.si_signo = SIGSEGV;
12937             info.si_errno = 0;
12938             info.si_code = TARGET_SEGV_MAPERR;
12939             info._sifields._sigfault._addr = arg6;
12940             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12941             ret = 0xdeadbeef;
12942 
12943         }
12944         if (mem_value == arg2)
12945             put_user_u32(arg1, arg6);
12946         return mem_value;
12947     }
12948 #endif
12949 #ifdef TARGET_NR_atomic_barrier
12950     case TARGET_NR_atomic_barrier:
12951         /* Like the kernel implementation and the
12952            qemu arm barrier, no-op this? */
12953         return 0;
12954 #endif
12955 
12956 #ifdef TARGET_NR_timer_create
12957     case TARGET_NR_timer_create:
12958     {
12959         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12960 
12961         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12962 
12963         int clkid = arg1;
12964         int timer_index = next_free_host_timer();
12965 
12966         if (timer_index < 0) {
12967             ret = -TARGET_EAGAIN;
12968         } else {
12969             timer_t *phtimer = g_posix_timers  + timer_index;
12970 
12971             if (arg2) {
12972                 phost_sevp = &host_sevp;
12973                 ret = target_to_host_sigevent(phost_sevp, arg2);
12974                 if (ret != 0) {
12975                     free_host_timer_slot(timer_index);
12976                     return ret;
12977                 }
12978             }
12979 
12980             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12981             if (ret) {
12982                 free_host_timer_slot(timer_index);
12983             } else {
12984                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12985                     timer_delete(*phtimer);
12986                     free_host_timer_slot(timer_index);
12987                     return -TARGET_EFAULT;
12988                 }
12989             }
12990         }
12991         return ret;
12992     }
12993 #endif
12994 
12995 #ifdef TARGET_NR_timer_settime
12996     case TARGET_NR_timer_settime:
12997     {
12998         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12999          * struct itimerspec * old_value */
13000         target_timer_t timerid = get_timer_id(arg1);
13001 
13002         if (timerid < 0) {
13003             ret = timerid;
13004         } else if (arg3 == 0) {
13005             ret = -TARGET_EINVAL;
13006         } else {
13007             timer_t htimer = g_posix_timers[timerid];
13008             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13009 
13010             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13011                 return -TARGET_EFAULT;
13012             }
13013             ret = get_errno(
13014                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13015             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13016                 return -TARGET_EFAULT;
13017             }
13018         }
13019         return ret;
13020     }
13021 #endif
13022 
13023 #ifdef TARGET_NR_timer_settime64
13024     case TARGET_NR_timer_settime64:
13025     {
13026         target_timer_t timerid = get_timer_id(arg1);
13027 
13028         if (timerid < 0) {
13029             ret = timerid;
13030         } else if (arg3 == 0) {
13031             ret = -TARGET_EINVAL;
13032         } else {
13033             timer_t htimer = g_posix_timers[timerid];
13034             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13035 
13036             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13037                 return -TARGET_EFAULT;
13038             }
13039             ret = get_errno(
13040                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13041             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13042                 return -TARGET_EFAULT;
13043             }
13044         }
13045         return ret;
13046     }
13047 #endif
13048 
13049 #ifdef TARGET_NR_timer_gettime
13050     case TARGET_NR_timer_gettime:
13051     {
13052         /* args: timer_t timerid, struct itimerspec *curr_value */
13053         target_timer_t timerid = get_timer_id(arg1);
13054 
13055         if (timerid < 0) {
13056             ret = timerid;
13057         } else if (!arg2) {
13058             ret = -TARGET_EFAULT;
13059         } else {
13060             timer_t htimer = g_posix_timers[timerid];
13061             struct itimerspec hspec;
13062             ret = get_errno(timer_gettime(htimer, &hspec));
13063 
13064             if (host_to_target_itimerspec(arg2, &hspec)) {
13065                 ret = -TARGET_EFAULT;
13066             }
13067         }
13068         return ret;
13069     }
13070 #endif
13071 
13072 #ifdef TARGET_NR_timer_gettime64
13073     case TARGET_NR_timer_gettime64:
13074     {
13075         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13076         target_timer_t timerid = get_timer_id(arg1);
13077 
13078         if (timerid < 0) {
13079             ret = timerid;
13080         } else if (!arg2) {
13081             ret = -TARGET_EFAULT;
13082         } else {
13083             timer_t htimer = g_posix_timers[timerid];
13084             struct itimerspec hspec;
13085             ret = get_errno(timer_gettime(htimer, &hspec));
13086 
13087             if (host_to_target_itimerspec64(arg2, &hspec)) {
13088                 ret = -TARGET_EFAULT;
13089             }
13090         }
13091         return ret;
13092     }
13093 #endif
13094 
13095 #ifdef TARGET_NR_timer_getoverrun
13096     case TARGET_NR_timer_getoverrun:
13097     {
13098         /* args: timer_t timerid */
13099         target_timer_t timerid = get_timer_id(arg1);
13100 
13101         if (timerid < 0) {
13102             ret = timerid;
13103         } else {
13104             timer_t htimer = g_posix_timers[timerid];
13105             ret = get_errno(timer_getoverrun(htimer));
13106         }
13107         return ret;
13108     }
13109 #endif
13110 
13111 #ifdef TARGET_NR_timer_delete
13112     case TARGET_NR_timer_delete:
13113     {
13114         /* args: timer_t timerid */
13115         target_timer_t timerid = get_timer_id(arg1);
13116 
13117         if (timerid < 0) {
13118             ret = timerid;
13119         } else {
13120             timer_t htimer = g_posix_timers[timerid];
13121             ret = get_errno(timer_delete(htimer));
13122             free_host_timer_slot(timerid);
13123         }
13124         return ret;
13125     }
13126 #endif
13127 
13128 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13129     case TARGET_NR_timerfd_create:
13130         ret = get_errno(timerfd_create(arg1,
13131                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13132         if (ret >= 0) {
13133             fd_trans_register(ret, &target_timerfd_trans);
13134         }
13135         return ret;
13136 #endif
13137 
13138 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13139     case TARGET_NR_timerfd_gettime:
13140         {
13141             struct itimerspec its_curr;
13142 
13143             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13144 
13145             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13146                 return -TARGET_EFAULT;
13147             }
13148         }
13149         return ret;
13150 #endif
13151 
13152 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13153     case TARGET_NR_timerfd_gettime64:
13154         {
13155             struct itimerspec its_curr;
13156 
13157             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13158 
13159             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13160                 return -TARGET_EFAULT;
13161             }
13162         }
13163         return ret;
13164 #endif
13165 
13166 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13167     case TARGET_NR_timerfd_settime:
13168         {
13169             struct itimerspec its_new, its_old, *p_new;
13170 
13171             if (arg3) {
13172                 if (target_to_host_itimerspec(&its_new, arg3)) {
13173                     return -TARGET_EFAULT;
13174                 }
13175                 p_new = &its_new;
13176             } else {
13177                 p_new = NULL;
13178             }
13179 
13180             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13181 
13182             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13183                 return -TARGET_EFAULT;
13184             }
13185         }
13186         return ret;
13187 #endif
13188 
13189 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13190     case TARGET_NR_timerfd_settime64:
13191         {
13192             struct itimerspec its_new, its_old, *p_new;
13193 
13194             if (arg3) {
13195                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13196                     return -TARGET_EFAULT;
13197                 }
13198                 p_new = &its_new;
13199             } else {
13200                 p_new = NULL;
13201             }
13202 
13203             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13204 
13205             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13206                 return -TARGET_EFAULT;
13207             }
13208         }
13209         return ret;
13210 #endif
13211 
13212 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13213     case TARGET_NR_ioprio_get:
13214         return get_errno(ioprio_get(arg1, arg2));
13215 #endif
13216 
13217 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13218     case TARGET_NR_ioprio_set:
13219         return get_errno(ioprio_set(arg1, arg2, arg3));
13220 #endif
13221 
13222 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13223     case TARGET_NR_setns:
13224         return get_errno(setns(arg1, arg2));
13225 #endif
13226 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13227     case TARGET_NR_unshare:
13228         return get_errno(unshare(arg1));
13229 #endif
13230 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13231     case TARGET_NR_kcmp:
13232         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13233 #endif
13234 #ifdef TARGET_NR_swapcontext
13235     case TARGET_NR_swapcontext:
13236         /* PowerPC specific.  */
13237         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13238 #endif
13239 #ifdef TARGET_NR_memfd_create
13240     case TARGET_NR_memfd_create:
13241         p = lock_user_string(arg1);
13242         if (!p) {
13243             return -TARGET_EFAULT;
13244         }
13245         ret = get_errno(memfd_create(p, arg2));
13246         fd_trans_unregister(ret);
13247         unlock_user(p, arg1, 0);
13248         return ret;
13249 #endif
13250 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13251     case TARGET_NR_membarrier:
13252         return get_errno(membarrier(arg1, arg2));
13253 #endif
13254 
13255 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13256     case TARGET_NR_copy_file_range:
13257         {
13258             loff_t inoff, outoff;
13259             loff_t *pinoff = NULL, *poutoff = NULL;
13260 
13261             if (arg2) {
13262                 if (get_user_u64(inoff, arg2)) {
13263                     return -TARGET_EFAULT;
13264                 }
13265                 pinoff = &inoff;
13266             }
13267             if (arg4) {
13268                 if (get_user_u64(outoff, arg4)) {
13269                     return -TARGET_EFAULT;
13270                 }
13271                 poutoff = &outoff;
13272             }
13273             /* Do not sign-extend the count parameter. */
13274             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13275                                                  (abi_ulong)arg5, arg6));
13276             if (!is_error(ret) && ret > 0) {
13277                 if (arg2) {
13278                     if (put_user_u64(inoff, arg2)) {
13279                         return -TARGET_EFAULT;
13280                     }
13281                 }
13282                 if (arg4) {
13283                     if (put_user_u64(outoff, arg4)) {
13284                         return -TARGET_EFAULT;
13285                     }
13286                 }
13287             }
13288         }
13289         return ret;
13290 #endif
13291 
13292 #if defined(TARGET_NR_pivot_root)
13293     case TARGET_NR_pivot_root:
13294         {
13295             void *p2;
13296             p = lock_user_string(arg1); /* new_root */
13297             p2 = lock_user_string(arg2); /* put_old */
13298             if (!p || !p2) {
13299                 ret = -TARGET_EFAULT;
13300             } else {
13301                 ret = get_errno(pivot_root(p, p2));
13302             }
13303             unlock_user(p2, arg2, 0);
13304             unlock_user(p, arg1, 0);
13305         }
13306         return ret;
13307 #endif
13308 
13309     default:
13310         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13311         return -TARGET_ENOSYS;
13312     }
13313     return ret;
13314 }
13315 
13316 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13317                     abi_long arg2, abi_long arg3, abi_long arg4,
13318                     abi_long arg5, abi_long arg6, abi_long arg7,
13319                     abi_long arg8)
13320 {
13321     CPUState *cpu = env_cpu(cpu_env);
13322     abi_long ret;
13323 
13324 #ifdef DEBUG_ERESTARTSYS
13325     /* Debug-only code for exercising the syscall-restart code paths
13326      * in the per-architecture cpu main loops: restart every syscall
13327      * the guest makes once before letting it through.
13328      */
13329     {
13330         static bool flag;
13331         flag = !flag;
13332         if (flag) {
13333             return -QEMU_ERESTARTSYS;
13334         }
13335     }
13336 #endif
13337 
13338     record_syscall_start(cpu, num, arg1,
13339                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13340 
13341     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13342         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13343     }
13344 
13345     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13346                       arg5, arg6, arg7, arg8);
13347 
13348     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13349         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13350                           arg3, arg4, arg5, arg6);
13351     }
13352 
13353     record_syscall_return(cpu, num, ret);
13354     return ret;
13355 }
13356