xref: /openbmc/qemu/linux-user/syscall.c (revision e8d1e0cd)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "target_mman.h"
26 #include <elf.h>
27 #include <endian.h>
28 #include <grp.h>
29 #include <sys/ipc.h>
30 #include <sys/msg.h>
31 #include <sys/wait.h>
32 #include <sys/mount.h>
33 #include <sys/file.h>
34 #include <sys/fsuid.h>
35 #include <sys/personality.h>
36 #include <sys/prctl.h>
37 #include <sys/resource.h>
38 #include <sys/swap.h>
39 #include <linux/capability.h>
40 #include <sched.h>
41 #include <sys/timex.h>
42 #include <sys/socket.h>
43 #include <linux/sockios.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <utime.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/in.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <netinet/udp.h>
59 #include <linux/wireless.h>
60 #include <linux/icmp.h>
61 #include <linux/icmpv6.h>
62 #include <linux/if_tun.h>
63 #include <linux/in6.h>
64 #include <linux/errqueue.h>
65 #include <linux/random.h>
66 #ifdef CONFIG_TIMERFD
67 #include <sys/timerfd.h>
68 #endif
69 #ifdef CONFIG_EVENTFD
70 #include <sys/eventfd.h>
71 #endif
72 #ifdef CONFIG_EPOLL
73 #include <sys/epoll.h>
74 #endif
75 #ifdef CONFIG_ATTR
76 #include "qemu/xattr.h"
77 #endif
78 #ifdef CONFIG_SENDFILE
79 #include <sys/sendfile.h>
80 #endif
81 #ifdef HAVE_SYS_KCOV_H
82 #include <sys/kcov.h>
83 #endif
84 
85 #define termios host_termios
86 #define winsize host_winsize
87 #define termio host_termio
88 #define sgttyb host_sgttyb /* same as target */
89 #define tchars host_tchars /* same as target */
90 #define ltchars host_ltchars /* same as target */
91 
92 #include <linux/termios.h>
93 #include <linux/unistd.h>
94 #include <linux/cdrom.h>
95 #include <linux/hdreg.h>
96 #include <linux/soundcard.h>
97 #include <linux/kd.h>
98 #include <linux/mtio.h>
99 #include <linux/fs.h>
100 #include <linux/fd.h>
101 #if defined(CONFIG_FIEMAP)
102 #include <linux/fiemap.h>
103 #endif
104 #include <linux/fb.h>
105 #if defined(CONFIG_USBFS)
106 #include <linux/usbdevice_fs.h>
107 #include <linux/usb/ch9.h>
108 #endif
109 #include <linux/vt.h>
110 #include <linux/dm-ioctl.h>
111 #include <linux/reboot.h>
112 #include <linux/route.h>
113 #include <linux/filter.h>
114 #include <linux/blkpg.h>
115 #include <netpacket/packet.h>
116 #include <linux/netlink.h>
117 #include <linux/if_alg.h>
118 #include <linux/rtc.h>
119 #include <sound/asound.h>
120 #ifdef HAVE_BTRFS_H
121 #include <linux/btrfs.h>
122 #endif
123 #ifdef HAVE_DRM_H
124 #include <libdrm/drm.h>
125 #include <libdrm/i915_drm.h>
126 #endif
127 #include "linux_loop.h"
128 #include "uname.h"
129 
130 #include "qemu.h"
131 #include "user-internals.h"
132 #include "strace.h"
133 #include "signal-common.h"
134 #include "loader.h"
135 #include "user-mmap.h"
136 #include "user/safe-syscall.h"
137 #include "qemu/guest-random.h"
138 #include "qemu/selfmap.h"
139 #include "user/syscall-trace.h"
140 #include "special-errno.h"
141 #include "qapi/error.h"
142 #include "fd-trans.h"
143 #include "tcg/tcg.h"
144 #include "cpu_loop-common.h"
145 
146 #ifndef CLONE_IO
147 #define CLONE_IO                0x80000000      /* Clone io context */
148 #endif
149 
150 /* We can't directly call the host clone syscall, because this will
151  * badly confuse libc (breaking mutexes, for example). So we must
152  * divide clone flags into:
153  *  * flag combinations that look like pthread_create()
154  *  * flag combinations that look like fork()
155  *  * flags we can implement within QEMU itself
156  *  * flags we can't support and will return an error for
157  */
158 /* For thread creation, all these flags must be present; for
159  * fork, none must be present.
160  */
161 #define CLONE_THREAD_FLAGS                              \
162     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
163      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
164 
165 /* These flags are ignored:
166  * CLONE_DETACHED is now ignored by the kernel;
167  * CLONE_IO is just an optimisation hint to the I/O scheduler
168  */
169 #define CLONE_IGNORED_FLAGS                     \
170     (CLONE_DETACHED | CLONE_IO)
171 
172 #ifndef CLONE_PIDFD
173 # define CLONE_PIDFD 0x00001000
174 #endif
175 
176 /* Flags for fork which we can implement within QEMU itself */
177 #define CLONE_OPTIONAL_FORK_FLAGS               \
178     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
179      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
180 
181 /* Flags for thread creation which we can implement within QEMU itself */
182 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
183     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
184      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
185 
186 #define CLONE_INVALID_FORK_FLAGS                                        \
187     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
188 
189 #define CLONE_INVALID_THREAD_FLAGS                                      \
190     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
191        CLONE_IGNORED_FLAGS))
192 
193 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
194  * have almost all been allocated. We cannot support any of
195  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
196  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
197  * The checks against the invalid thread masks above will catch these.
198  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
199  */
200 
201 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
202  * once. This exercises the codepaths for restart.
203  */
204 //#define DEBUG_ERESTARTSYS
205 
206 //#include <linux/msdos_fs.h>
207 #define VFAT_IOCTL_READDIR_BOTH \
208     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
209 #define VFAT_IOCTL_READDIR_SHORT \
210     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
211 
212 #undef _syscall0
213 #undef _syscall1
214 #undef _syscall2
215 #undef _syscall3
216 #undef _syscall4
217 #undef _syscall5
218 #undef _syscall6
219 
220 #define _syscall0(type,name)		\
221 static type name (void)			\
222 {					\
223 	return syscall(__NR_##name);	\
224 }
225 
226 #define _syscall1(type,name,type1,arg1)		\
227 static type name (type1 arg1)			\
228 {						\
229 	return syscall(__NR_##name, arg1);	\
230 }
231 
232 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
233 static type name (type1 arg1,type2 arg2)		\
234 {							\
235 	return syscall(__NR_##name, arg1, arg2);	\
236 }
237 
238 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3)		\
240 {								\
241 	return syscall(__NR_##name, arg1, arg2, arg3);		\
242 }
243 
244 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
246 {										\
247 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
248 }
249 
250 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
251 		  type5,arg5)							\
252 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
253 {										\
254 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
255 }
256 
257 
258 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
259 		  type5,arg5,type6,arg6)					\
260 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
261                   type6 arg6)							\
262 {										\
263 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
264 }
265 
266 
267 #define __NR_sys_uname __NR_uname
268 #define __NR_sys_getcwd1 __NR_getcwd
269 #define __NR_sys_getdents __NR_getdents
270 #define __NR_sys_getdents64 __NR_getdents64
271 #define __NR_sys_getpriority __NR_getpriority
272 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
273 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
274 #define __NR_sys_syslog __NR_syslog
275 #if defined(__NR_futex)
276 # define __NR_sys_futex __NR_futex
277 #endif
278 #if defined(__NR_futex_time64)
279 # define __NR_sys_futex_time64 __NR_futex_time64
280 #endif
281 #define __NR_sys_statx __NR_statx
282 
283 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
284 #define __NR__llseek __NR_lseek
285 #endif
286 
287 /* Newer kernel ports have llseek() instead of _llseek() */
288 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
289 #define TARGET_NR__llseek TARGET_NR_llseek
290 #endif
291 
292 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
293 #ifndef TARGET_O_NONBLOCK_MASK
294 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
295 #endif
296 
297 #define __NR_sys_gettid __NR_gettid
298 _syscall0(int, sys_gettid)
299 
300 /* For the 64-bit guest on 32-bit host case we must emulate
301  * getdents using getdents64, because otherwise the host
302  * might hand us back more dirent records than we can fit
303  * into the guest buffer after structure format conversion.
304  * Otherwise we emulate getdents with getdents if the host has it.
305  */
306 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
307 #define EMULATE_GETDENTS_WITH_GETDENTS
308 #endif
309 
310 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
311 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
312 #endif
313 #if (defined(TARGET_NR_getdents) && \
314       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
315     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
316 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
317 #endif
318 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
319 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
320           loff_t *, res, uint, wh);
321 #endif
322 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
323 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
324           siginfo_t *, uinfo)
325 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
326 #ifdef __NR_exit_group
327 _syscall1(int,exit_group,int,error_code)
328 #endif
329 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
330 #define __NR_sys_close_range __NR_close_range
331 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
332 #ifndef CLOSE_RANGE_CLOEXEC
333 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
334 #endif
335 #endif
336 #if defined(__NR_futex)
337 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
338           const struct timespec *,timeout,int *,uaddr2,int,val3)
339 #endif
340 #if defined(__NR_futex_time64)
341 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
342           const struct timespec *,timeout,int *,uaddr2,int,val3)
343 #endif
344 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
345 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
346 #endif
347 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
348 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
349                              unsigned int, flags);
350 #endif
351 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
352 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
353 #endif
354 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
355 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
356           unsigned long *, user_mask_ptr);
357 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
358 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
359           unsigned long *, user_mask_ptr);
360 /* sched_attr is not defined in glibc */
361 struct sched_attr {
362     uint32_t size;
363     uint32_t sched_policy;
364     uint64_t sched_flags;
365     int32_t sched_nice;
366     uint32_t sched_priority;
367     uint64_t sched_runtime;
368     uint64_t sched_deadline;
369     uint64_t sched_period;
370     uint32_t sched_util_min;
371     uint32_t sched_util_max;
372 };
373 #define __NR_sys_sched_getattr __NR_sched_getattr
374 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
375           unsigned int, size, unsigned int, flags);
376 #define __NR_sys_sched_setattr __NR_sched_setattr
377 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
378           unsigned int, flags);
379 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
380 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
381 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
382 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
383           const struct sched_param *, param);
384 #define __NR_sys_sched_getparam __NR_sched_getparam
385 _syscall2(int, sys_sched_getparam, pid_t, pid,
386           struct sched_param *, param);
387 #define __NR_sys_sched_setparam __NR_sched_setparam
388 _syscall2(int, sys_sched_setparam, pid_t, pid,
389           const struct sched_param *, param);
390 #define __NR_sys_getcpu __NR_getcpu
391 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
392 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
393           void *, arg);
394 _syscall2(int, capget, struct __user_cap_header_struct *, header,
395           struct __user_cap_data_struct *, data);
396 _syscall2(int, capset, struct __user_cap_header_struct *, header,
397           struct __user_cap_data_struct *, data);
398 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
399 _syscall2(int, ioprio_get, int, which, int, who)
400 #endif
401 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
402 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
403 #endif
404 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
405 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
406 #endif
407 
408 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
409 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
410           unsigned long, idx1, unsigned long, idx2)
411 #endif
412 
413 /*
414  * It is assumed that struct statx is architecture independent.
415  */
416 #if defined(TARGET_NR_statx) && defined(__NR_statx)
417 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
418           unsigned int, mask, struct target_statx *, statxbuf)
419 #endif
420 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
421 _syscall2(int, membarrier, int, cmd, int, flags)
422 #endif
423 
424 static const bitmask_transtbl fcntl_flags_tbl[] = {
425   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
426   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
427   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
428   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
429   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
430   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
431   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
432   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
433   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
434   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
435   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
436   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
437   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
438 #if defined(O_DIRECT)
439   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
440 #endif
441 #if defined(O_NOATIME)
442   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
443 #endif
444 #if defined(O_CLOEXEC)
445   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
446 #endif
447 #if defined(O_PATH)
448   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
449 #endif
450 #if defined(O_TMPFILE)
451   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
452 #endif
453   /* Don't terminate the list prematurely on 64-bit host+guest.  */
454 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
455   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
456 #endif
457   { 0, 0, 0, 0 }
458 };
459 
460 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
461 
462 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
463 #if defined(__NR_utimensat)
464 #define __NR_sys_utimensat __NR_utimensat
465 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
466           const struct timespec *,tsp,int,flags)
467 #else
468 static int sys_utimensat(int dirfd, const char *pathname,
469                          const struct timespec times[2], int flags)
470 {
471     errno = ENOSYS;
472     return -1;
473 }
474 #endif
475 #endif /* TARGET_NR_utimensat */
476 
477 #ifdef TARGET_NR_renameat2
478 #if defined(__NR_renameat2)
479 #define __NR_sys_renameat2 __NR_renameat2
480 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
481           const char *, new, unsigned int, flags)
482 #else
483 static int sys_renameat2(int oldfd, const char *old,
484                          int newfd, const char *new, int flags)
485 {
486     if (flags == 0) {
487         return renameat(oldfd, old, newfd, new);
488     }
489     errno = ENOSYS;
490     return -1;
491 }
492 #endif
493 #endif /* TARGET_NR_renameat2 */
494 
495 #ifdef CONFIG_INOTIFY
496 #include <sys/inotify.h>
497 #else
498 /* Userspace can usually survive runtime without inotify */
499 #undef TARGET_NR_inotify_init
500 #undef TARGET_NR_inotify_init1
501 #undef TARGET_NR_inotify_add_watch
502 #undef TARGET_NR_inotify_rm_watch
503 #endif /* CONFIG_INOTIFY  */
504 
505 #if defined(TARGET_NR_prlimit64)
506 #ifndef __NR_prlimit64
507 # define __NR_prlimit64 -1
508 #endif
509 #define __NR_sys_prlimit64 __NR_prlimit64
510 /* The glibc rlimit structure may not be that used by the underlying syscall */
511 struct host_rlimit64 {
512     uint64_t rlim_cur;
513     uint64_t rlim_max;
514 };
515 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
516           const struct host_rlimit64 *, new_limit,
517           struct host_rlimit64 *, old_limit)
518 #endif
519 
520 
521 #if defined(TARGET_NR_timer_create)
522 /* Maximum of 32 active POSIX timers allowed at any one time. */
523 #define GUEST_TIMER_MAX 32
524 static timer_t g_posix_timers[GUEST_TIMER_MAX];
525 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
526 
527 static inline int next_free_host_timer(void)
528 {
529     int k;
530     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
531         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
532             return k;
533         }
534     }
535     return -1;
536 }
537 
538 static inline void free_host_timer_slot(int id)
539 {
540     qatomic_store_release(g_posix_timer_allocated + id, 0);
541 }
542 #endif
543 
544 static inline int host_to_target_errno(int host_errno)
545 {
546     switch (host_errno) {
547 #define E(X)  case X: return TARGET_##X;
548 #include "errnos.c.inc"
549 #undef E
550     default:
551         return host_errno;
552     }
553 }
554 
555 static inline int target_to_host_errno(int target_errno)
556 {
557     switch (target_errno) {
558 #define E(X)  case TARGET_##X: return X;
559 #include "errnos.c.inc"
560 #undef E
561     default:
562         return target_errno;
563     }
564 }
565 
566 abi_long get_errno(abi_long ret)
567 {
568     if (ret == -1)
569         return -host_to_target_errno(errno);
570     else
571         return ret;
572 }
573 
574 const char *target_strerror(int err)
575 {
576     if (err == QEMU_ERESTARTSYS) {
577         return "To be restarted";
578     }
579     if (err == QEMU_ESIGRETURN) {
580         return "Successful exit from sigreturn";
581     }
582 
583     return strerror(target_to_host_errno(err));
584 }
585 
586 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
587 {
588     int i;
589     uint8_t b;
590     if (usize <= ksize) {
591         return 1;
592     }
593     for (i = ksize; i < usize; i++) {
594         if (get_user_u8(b, addr + i)) {
595             return -TARGET_EFAULT;
596         }
597         if (b != 0) {
598             return 0;
599         }
600     }
601     return 1;
602 }
603 
604 #define safe_syscall0(type, name) \
605 static type safe_##name(void) \
606 { \
607     return safe_syscall(__NR_##name); \
608 }
609 
610 #define safe_syscall1(type, name, type1, arg1) \
611 static type safe_##name(type1 arg1) \
612 { \
613     return safe_syscall(__NR_##name, arg1); \
614 }
615 
616 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
617 static type safe_##name(type1 arg1, type2 arg2) \
618 { \
619     return safe_syscall(__NR_##name, arg1, arg2); \
620 }
621 
622 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
623 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
624 { \
625     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
626 }
627 
628 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
629     type4, arg4) \
630 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
631 { \
632     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
633 }
634 
635 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
636     type4, arg4, type5, arg5) \
637 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
638     type5 arg5) \
639 { \
640     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
641 }
642 
643 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
644     type4, arg4, type5, arg5, type6, arg6) \
645 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
646     type5 arg5, type6 arg6) \
647 { \
648     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
649 }
650 
651 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
652 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
653 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
654               int, flags, mode_t, mode)
655 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
656 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
657               struct rusage *, rusage)
658 #endif
659 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
660               int, options, struct rusage *, rusage)
661 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
662               char **, argv, char **, envp, int, flags)
663 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
664     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
665 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
666               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
667 #endif
668 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
669 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
670               struct timespec *, tsp, const sigset_t *, sigmask,
671               size_t, sigsetsize)
672 #endif
673 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
674               int, maxevents, int, timeout, const sigset_t *, sigmask,
675               size_t, sigsetsize)
676 #if defined(__NR_futex)
677 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
678               const struct timespec *,timeout,int *,uaddr2,int,val3)
679 #endif
680 #if defined(__NR_futex_time64)
681 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
682               const struct timespec *,timeout,int *,uaddr2,int,val3)
683 #endif
684 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
685 safe_syscall2(int, kill, pid_t, pid, int, sig)
686 safe_syscall2(int, tkill, int, tid, int, sig)
687 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
688 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
689 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
690 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
691               unsigned long, pos_l, unsigned long, pos_h)
692 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
693               unsigned long, pos_l, unsigned long, pos_h)
694 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
695               socklen_t, addrlen)
696 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
697               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
698 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
699               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
700 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
701 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
702 safe_syscall2(int, flock, int, fd, int, operation)
703 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
704 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
705               const struct timespec *, uts, size_t, sigsetsize)
706 #endif
707 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
708               int, flags)
709 #if defined(TARGET_NR_nanosleep)
710 safe_syscall2(int, nanosleep, const struct timespec *, req,
711               struct timespec *, rem)
712 #endif
713 #if defined(TARGET_NR_clock_nanosleep) || \
714     defined(TARGET_NR_clock_nanosleep_time64)
715 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
716               const struct timespec *, req, struct timespec *, rem)
717 #endif
718 #ifdef __NR_ipc
719 #ifdef __s390x__
720 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
721               void *, ptr)
722 #else
723 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
724               void *, ptr, long, fifth)
725 #endif
726 #endif
727 #ifdef __NR_msgsnd
728 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
729               int, flags)
730 #endif
731 #ifdef __NR_msgrcv
732 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
733               long, msgtype, int, flags)
734 #endif
735 #ifdef __NR_semtimedop
736 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
737               unsigned, nsops, const struct timespec *, timeout)
738 #endif
739 #if defined(TARGET_NR_mq_timedsend) || \
740     defined(TARGET_NR_mq_timedsend_time64)
741 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
742               size_t, len, unsigned, prio, const struct timespec *, timeout)
743 #endif
744 #if defined(TARGET_NR_mq_timedreceive) || \
745     defined(TARGET_NR_mq_timedreceive_time64)
746 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
747               size_t, len, unsigned *, prio, const struct timespec *, timeout)
748 #endif
749 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
750 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
751               int, outfd, loff_t *, poutoff, size_t, length,
752               unsigned int, flags)
753 #endif
754 
755 /* We do ioctl like this rather than via safe_syscall3 to preserve the
756  * "third argument might be integer or pointer or not present" behaviour of
757  * the libc function.
758  */
759 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
760 /* Similarly for fcntl. Note that callers must always:
761  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
762  *  use the flock64 struct rather than unsuffixed flock
763  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
764  */
765 #ifdef __NR_fcntl64
766 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
767 #else
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
769 #endif
770 
771 static inline int host_to_target_sock_type(int host_type)
772 {
773     int target_type;
774 
775     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
776     case SOCK_DGRAM:
777         target_type = TARGET_SOCK_DGRAM;
778         break;
779     case SOCK_STREAM:
780         target_type = TARGET_SOCK_STREAM;
781         break;
782     default:
783         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
784         break;
785     }
786 
787 #if defined(SOCK_CLOEXEC)
788     if (host_type & SOCK_CLOEXEC) {
789         target_type |= TARGET_SOCK_CLOEXEC;
790     }
791 #endif
792 
793 #if defined(SOCK_NONBLOCK)
794     if (host_type & SOCK_NONBLOCK) {
795         target_type |= TARGET_SOCK_NONBLOCK;
796     }
797 #endif
798 
799     return target_type;
800 }
801 
802 static abi_ulong target_brk;
803 static abi_ulong brk_page;
804 
805 void target_set_brk(abi_ulong new_brk)
806 {
807     target_brk = new_brk;
808     brk_page = HOST_PAGE_ALIGN(target_brk);
809 }
810 
811 /* do_brk() must return target values and target errnos. */
812 abi_long do_brk(abi_ulong brk_val)
813 {
814     abi_long mapped_addr;
815     abi_ulong new_alloc_size;
816     abi_ulong new_brk, new_host_brk_page;
817 
818     /* brk pointers are always untagged */
819 
820     /* return old brk value if brk_val unchanged or zero */
821     if (!brk_val || brk_val == target_brk) {
822         return target_brk;
823     }
824 
825     new_brk = TARGET_PAGE_ALIGN(brk_val);
826     new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
827 
828     /* brk_val and old target_brk might be on the same page */
829     if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
830         if (brk_val > target_brk) {
831             /* empty remaining bytes in (possibly larger) host page */
832             memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk);
833         }
834         target_brk = brk_val;
835         return target_brk;
836     }
837 
838     /* Release heap if necesary */
839     if (new_brk < target_brk) {
840         /* empty remaining bytes in (possibly larger) host page */
841         memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val);
842 
843         /* free unused host pages and set new brk_page */
844         target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
845         brk_page = new_host_brk_page;
846 
847         target_brk = brk_val;
848         return target_brk;
849     }
850 
851     /* We need to allocate more memory after the brk... Note that
852      * we don't use MAP_FIXED because that will map over the top of
853      * any existing mapping (like the one with the host libc or qemu
854      * itself); instead we treat "mapped but at wrong address" as
855      * a failure and unmap again.
856      */
857     new_alloc_size = new_host_brk_page - brk_page;
858     if (new_alloc_size) {
859         mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
860                                         PROT_READ|PROT_WRITE,
861                                         MAP_ANON|MAP_PRIVATE, 0, 0));
862     } else {
863         mapped_addr = brk_page;
864     }
865 
866     if (mapped_addr == brk_page) {
867         /* Heap contents are initialized to zero, as for anonymous
868          * mapped pages.  Technically the new pages are already
869          * initialized to zero since they *are* anonymous mapped
870          * pages, however we have to take care with the contents that
871          * come from the remaining part of the previous page: it may
872          * contains garbage data due to a previous heap usage (grown
873          * then shrunken).  */
874         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
875 
876         target_brk = brk_val;
877         brk_page = new_host_brk_page;
878         return target_brk;
879     } else if (mapped_addr != -1) {
880         /* Mapped but at wrong address, meaning there wasn't actually
881          * enough space for this brk.
882          */
883         target_munmap(mapped_addr, new_alloc_size);
884         mapped_addr = -1;
885     }
886 
887 #if defined(TARGET_ALPHA)
888     /* We (partially) emulate OSF/1 on Alpha, which requires we
889        return a proper errno, not an unchanged brk value.  */
890     return -TARGET_ENOMEM;
891 #endif
892     /* For everything else, return the previous break. */
893     return target_brk;
894 }
895 
896 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
897     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
898 static inline abi_long copy_from_user_fdset(fd_set *fds,
899                                             abi_ulong target_fds_addr,
900                                             int n)
901 {
902     int i, nw, j, k;
903     abi_ulong b, *target_fds;
904 
905     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
906     if (!(target_fds = lock_user(VERIFY_READ,
907                                  target_fds_addr,
908                                  sizeof(abi_ulong) * nw,
909                                  1)))
910         return -TARGET_EFAULT;
911 
912     FD_ZERO(fds);
913     k = 0;
914     for (i = 0; i < nw; i++) {
915         /* grab the abi_ulong */
916         __get_user(b, &target_fds[i]);
917         for (j = 0; j < TARGET_ABI_BITS; j++) {
918             /* check the bit inside the abi_ulong */
919             if ((b >> j) & 1)
920                 FD_SET(k, fds);
921             k++;
922         }
923     }
924 
925     unlock_user(target_fds, target_fds_addr, 0);
926 
927     return 0;
928 }
929 
930 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
931                                                  abi_ulong target_fds_addr,
932                                                  int n)
933 {
934     if (target_fds_addr) {
935         if (copy_from_user_fdset(fds, target_fds_addr, n))
936             return -TARGET_EFAULT;
937         *fds_ptr = fds;
938     } else {
939         *fds_ptr = NULL;
940     }
941     return 0;
942 }
943 
944 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
945                                           const fd_set *fds,
946                                           int n)
947 {
948     int i, nw, j, k;
949     abi_long v;
950     abi_ulong *target_fds;
951 
952     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
953     if (!(target_fds = lock_user(VERIFY_WRITE,
954                                  target_fds_addr,
955                                  sizeof(abi_ulong) * nw,
956                                  0)))
957         return -TARGET_EFAULT;
958 
959     k = 0;
960     for (i = 0; i < nw; i++) {
961         v = 0;
962         for (j = 0; j < TARGET_ABI_BITS; j++) {
963             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
964             k++;
965         }
966         __put_user(v, &target_fds[i]);
967     }
968 
969     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
970 
971     return 0;
972 }
973 #endif
974 
975 #if defined(__alpha__)
976 #define HOST_HZ 1024
977 #else
978 #define HOST_HZ 100
979 #endif
980 
981 static inline abi_long host_to_target_clock_t(long ticks)
982 {
983 #if HOST_HZ == TARGET_HZ
984     return ticks;
985 #else
986     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
987 #endif
988 }
989 
990 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
991                                              const struct rusage *rusage)
992 {
993     struct target_rusage *target_rusage;
994 
995     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
996         return -TARGET_EFAULT;
997     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
998     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
999     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1000     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1001     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1002     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1003     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1004     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1005     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1006     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1007     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1008     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1009     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1010     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1011     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1012     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1013     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1014     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1015     unlock_user_struct(target_rusage, target_addr, 1);
1016 
1017     return 0;
1018 }
1019 
1020 #ifdef TARGET_NR_setrlimit
1021 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1022 {
1023     abi_ulong target_rlim_swap;
1024     rlim_t result;
1025 
1026     target_rlim_swap = tswapal(target_rlim);
1027     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1028         return RLIM_INFINITY;
1029 
1030     result = target_rlim_swap;
1031     if (target_rlim_swap != (rlim_t)result)
1032         return RLIM_INFINITY;
1033 
1034     return result;
1035 }
1036 #endif
1037 
1038 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1039 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1040 {
1041     abi_ulong target_rlim_swap;
1042     abi_ulong result;
1043 
1044     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1045         target_rlim_swap = TARGET_RLIM_INFINITY;
1046     else
1047         target_rlim_swap = rlim;
1048     result = tswapal(target_rlim_swap);
1049 
1050     return result;
1051 }
1052 #endif
1053 
1054 static inline int target_to_host_resource(int code)
1055 {
1056     switch (code) {
1057     case TARGET_RLIMIT_AS:
1058         return RLIMIT_AS;
1059     case TARGET_RLIMIT_CORE:
1060         return RLIMIT_CORE;
1061     case TARGET_RLIMIT_CPU:
1062         return RLIMIT_CPU;
1063     case TARGET_RLIMIT_DATA:
1064         return RLIMIT_DATA;
1065     case TARGET_RLIMIT_FSIZE:
1066         return RLIMIT_FSIZE;
1067     case TARGET_RLIMIT_LOCKS:
1068         return RLIMIT_LOCKS;
1069     case TARGET_RLIMIT_MEMLOCK:
1070         return RLIMIT_MEMLOCK;
1071     case TARGET_RLIMIT_MSGQUEUE:
1072         return RLIMIT_MSGQUEUE;
1073     case TARGET_RLIMIT_NICE:
1074         return RLIMIT_NICE;
1075     case TARGET_RLIMIT_NOFILE:
1076         return RLIMIT_NOFILE;
1077     case TARGET_RLIMIT_NPROC:
1078         return RLIMIT_NPROC;
1079     case TARGET_RLIMIT_RSS:
1080         return RLIMIT_RSS;
1081     case TARGET_RLIMIT_RTPRIO:
1082         return RLIMIT_RTPRIO;
1083 #ifdef RLIMIT_RTTIME
1084     case TARGET_RLIMIT_RTTIME:
1085         return RLIMIT_RTTIME;
1086 #endif
1087     case TARGET_RLIMIT_SIGPENDING:
1088         return RLIMIT_SIGPENDING;
1089     case TARGET_RLIMIT_STACK:
1090         return RLIMIT_STACK;
1091     default:
1092         return code;
1093     }
1094 }
1095 
1096 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1097                                               abi_ulong target_tv_addr)
1098 {
1099     struct target_timeval *target_tv;
1100 
1101     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1102         return -TARGET_EFAULT;
1103     }
1104 
1105     __get_user(tv->tv_sec, &target_tv->tv_sec);
1106     __get_user(tv->tv_usec, &target_tv->tv_usec);
1107 
1108     unlock_user_struct(target_tv, target_tv_addr, 0);
1109 
1110     return 0;
1111 }
1112 
1113 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1114                                             const struct timeval *tv)
1115 {
1116     struct target_timeval *target_tv;
1117 
1118     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1119         return -TARGET_EFAULT;
1120     }
1121 
1122     __put_user(tv->tv_sec, &target_tv->tv_sec);
1123     __put_user(tv->tv_usec, &target_tv->tv_usec);
1124 
1125     unlock_user_struct(target_tv, target_tv_addr, 1);
1126 
1127     return 0;
1128 }
1129 
1130 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1131 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1132                                                 abi_ulong target_tv_addr)
1133 {
1134     struct target__kernel_sock_timeval *target_tv;
1135 
1136     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1137         return -TARGET_EFAULT;
1138     }
1139 
1140     __get_user(tv->tv_sec, &target_tv->tv_sec);
1141     __get_user(tv->tv_usec, &target_tv->tv_usec);
1142 
1143     unlock_user_struct(target_tv, target_tv_addr, 0);
1144 
1145     return 0;
1146 }
1147 #endif
1148 
1149 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1150                                               const struct timeval *tv)
1151 {
1152     struct target__kernel_sock_timeval *target_tv;
1153 
1154     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1155         return -TARGET_EFAULT;
1156     }
1157 
1158     __put_user(tv->tv_sec, &target_tv->tv_sec);
1159     __put_user(tv->tv_usec, &target_tv->tv_usec);
1160 
1161     unlock_user_struct(target_tv, target_tv_addr, 1);
1162 
1163     return 0;
1164 }
1165 
1166 #if defined(TARGET_NR_futex) || \
1167     defined(TARGET_NR_rt_sigtimedwait) || \
1168     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1169     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1170     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1171     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1172     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1173     defined(TARGET_NR_timer_settime) || \
1174     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1175 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1176                                                abi_ulong target_addr)
1177 {
1178     struct target_timespec *target_ts;
1179 
1180     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1181         return -TARGET_EFAULT;
1182     }
1183     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1184     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1185     unlock_user_struct(target_ts, target_addr, 0);
1186     return 0;
1187 }
1188 #endif
1189 
1190 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1191     defined(TARGET_NR_timer_settime64) || \
1192     defined(TARGET_NR_mq_timedsend_time64) || \
1193     defined(TARGET_NR_mq_timedreceive_time64) || \
1194     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1195     defined(TARGET_NR_clock_nanosleep_time64) || \
1196     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1197     defined(TARGET_NR_utimensat) || \
1198     defined(TARGET_NR_utimensat_time64) || \
1199     defined(TARGET_NR_semtimedop_time64) || \
1200     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1201 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1202                                                  abi_ulong target_addr)
1203 {
1204     struct target__kernel_timespec *target_ts;
1205 
1206     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1207         return -TARGET_EFAULT;
1208     }
1209     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1210     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1211     /* in 32bit mode, this drops the padding */
1212     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1213     unlock_user_struct(target_ts, target_addr, 0);
1214     return 0;
1215 }
1216 #endif
1217 
1218 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1219                                                struct timespec *host_ts)
1220 {
1221     struct target_timespec *target_ts;
1222 
1223     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1224         return -TARGET_EFAULT;
1225     }
1226     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1227     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1228     unlock_user_struct(target_ts, target_addr, 1);
1229     return 0;
1230 }
1231 
1232 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1233                                                  struct timespec *host_ts)
1234 {
1235     struct target__kernel_timespec *target_ts;
1236 
1237     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1238         return -TARGET_EFAULT;
1239     }
1240     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1241     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1242     unlock_user_struct(target_ts, target_addr, 1);
1243     return 0;
1244 }
1245 
1246 #if defined(TARGET_NR_gettimeofday)
1247 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1248                                              struct timezone *tz)
1249 {
1250     struct target_timezone *target_tz;
1251 
1252     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1253         return -TARGET_EFAULT;
1254     }
1255 
1256     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1257     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1258 
1259     unlock_user_struct(target_tz, target_tz_addr, 1);
1260 
1261     return 0;
1262 }
1263 #endif
1264 
1265 #if defined(TARGET_NR_settimeofday)
1266 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1267                                                abi_ulong target_tz_addr)
1268 {
1269     struct target_timezone *target_tz;
1270 
1271     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1272         return -TARGET_EFAULT;
1273     }
1274 
1275     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1276     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1277 
1278     unlock_user_struct(target_tz, target_tz_addr, 0);
1279 
1280     return 0;
1281 }
1282 #endif
1283 
1284 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1285 #include <mqueue.h>
1286 
1287 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1288                                               abi_ulong target_mq_attr_addr)
1289 {
1290     struct target_mq_attr *target_mq_attr;
1291 
1292     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1293                           target_mq_attr_addr, 1))
1294         return -TARGET_EFAULT;
1295 
1296     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1297     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1298     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1299     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1300 
1301     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1302 
1303     return 0;
1304 }
1305 
1306 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1307                                             const struct mq_attr *attr)
1308 {
1309     struct target_mq_attr *target_mq_attr;
1310 
1311     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1312                           target_mq_attr_addr, 0))
1313         return -TARGET_EFAULT;
1314 
1315     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1316     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1317     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1318     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1319 
1320     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1321 
1322     return 0;
1323 }
1324 #endif
1325 
1326 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1327 /* do_select() must return target values and target errnos. */
1328 static abi_long do_select(int n,
1329                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1330                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1331 {
1332     fd_set rfds, wfds, efds;
1333     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1334     struct timeval tv;
1335     struct timespec ts, *ts_ptr;
1336     abi_long ret;
1337 
1338     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1339     if (ret) {
1340         return ret;
1341     }
1342     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1343     if (ret) {
1344         return ret;
1345     }
1346     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1347     if (ret) {
1348         return ret;
1349     }
1350 
1351     if (target_tv_addr) {
1352         if (copy_from_user_timeval(&tv, target_tv_addr))
1353             return -TARGET_EFAULT;
1354         ts.tv_sec = tv.tv_sec;
1355         ts.tv_nsec = tv.tv_usec * 1000;
1356         ts_ptr = &ts;
1357     } else {
1358         ts_ptr = NULL;
1359     }
1360 
1361     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1362                                   ts_ptr, NULL));
1363 
1364     if (!is_error(ret)) {
1365         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1366             return -TARGET_EFAULT;
1367         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1368             return -TARGET_EFAULT;
1369         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1370             return -TARGET_EFAULT;
1371 
1372         if (target_tv_addr) {
1373             tv.tv_sec = ts.tv_sec;
1374             tv.tv_usec = ts.tv_nsec / 1000;
1375             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1376                 return -TARGET_EFAULT;
1377             }
1378         }
1379     }
1380 
1381     return ret;
1382 }
1383 
1384 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1385 static abi_long do_old_select(abi_ulong arg1)
1386 {
1387     struct target_sel_arg_struct *sel;
1388     abi_ulong inp, outp, exp, tvp;
1389     long nsel;
1390 
1391     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1392         return -TARGET_EFAULT;
1393     }
1394 
1395     nsel = tswapal(sel->n);
1396     inp = tswapal(sel->inp);
1397     outp = tswapal(sel->outp);
1398     exp = tswapal(sel->exp);
1399     tvp = tswapal(sel->tvp);
1400 
1401     unlock_user_struct(sel, arg1, 0);
1402 
1403     return do_select(nsel, inp, outp, exp, tvp);
1404 }
1405 #endif
1406 #endif
1407 
1408 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1409 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1410                             abi_long arg4, abi_long arg5, abi_long arg6,
1411                             bool time64)
1412 {
1413     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1414     fd_set rfds, wfds, efds;
1415     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1416     struct timespec ts, *ts_ptr;
1417     abi_long ret;
1418 
1419     /*
1420      * The 6th arg is actually two args smashed together,
1421      * so we cannot use the C library.
1422      */
1423     struct {
1424         sigset_t *set;
1425         size_t size;
1426     } sig, *sig_ptr;
1427 
1428     abi_ulong arg_sigset, arg_sigsize, *arg7;
1429 
1430     n = arg1;
1431     rfd_addr = arg2;
1432     wfd_addr = arg3;
1433     efd_addr = arg4;
1434     ts_addr = arg5;
1435 
1436     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1437     if (ret) {
1438         return ret;
1439     }
1440     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1441     if (ret) {
1442         return ret;
1443     }
1444     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1445     if (ret) {
1446         return ret;
1447     }
1448 
1449     /*
1450      * This takes a timespec, and not a timeval, so we cannot
1451      * use the do_select() helper ...
1452      */
1453     if (ts_addr) {
1454         if (time64) {
1455             if (target_to_host_timespec64(&ts, ts_addr)) {
1456                 return -TARGET_EFAULT;
1457             }
1458         } else {
1459             if (target_to_host_timespec(&ts, ts_addr)) {
1460                 return -TARGET_EFAULT;
1461             }
1462         }
1463             ts_ptr = &ts;
1464     } else {
1465         ts_ptr = NULL;
1466     }
1467 
1468     /* Extract the two packed args for the sigset */
1469     sig_ptr = NULL;
1470     if (arg6) {
1471         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1472         if (!arg7) {
1473             return -TARGET_EFAULT;
1474         }
1475         arg_sigset = tswapal(arg7[0]);
1476         arg_sigsize = tswapal(arg7[1]);
1477         unlock_user(arg7, arg6, 0);
1478 
1479         if (arg_sigset) {
1480             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1481             if (ret != 0) {
1482                 return ret;
1483             }
1484             sig_ptr = &sig;
1485             sig.size = SIGSET_T_SIZE;
1486         }
1487     }
1488 
1489     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1490                                   ts_ptr, sig_ptr));
1491 
1492     if (sig_ptr) {
1493         finish_sigsuspend_mask(ret);
1494     }
1495 
1496     if (!is_error(ret)) {
1497         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1501             return -TARGET_EFAULT;
1502         }
1503         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1504             return -TARGET_EFAULT;
1505         }
1506         if (time64) {
1507             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1508                 return -TARGET_EFAULT;
1509             }
1510         } else {
1511             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1512                 return -TARGET_EFAULT;
1513             }
1514         }
1515     }
1516     return ret;
1517 }
1518 #endif
1519 
1520 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1521     defined(TARGET_NR_ppoll_time64)
1522 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1523                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1524 {
1525     struct target_pollfd *target_pfd;
1526     unsigned int nfds = arg2;
1527     struct pollfd *pfd;
1528     unsigned int i;
1529     abi_long ret;
1530 
1531     pfd = NULL;
1532     target_pfd = NULL;
1533     if (nfds) {
1534         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1535             return -TARGET_EINVAL;
1536         }
1537         target_pfd = lock_user(VERIFY_WRITE, arg1,
1538                                sizeof(struct target_pollfd) * nfds, 1);
1539         if (!target_pfd) {
1540             return -TARGET_EFAULT;
1541         }
1542 
1543         pfd = alloca(sizeof(struct pollfd) * nfds);
1544         for (i = 0; i < nfds; i++) {
1545             pfd[i].fd = tswap32(target_pfd[i].fd);
1546             pfd[i].events = tswap16(target_pfd[i].events);
1547         }
1548     }
1549     if (ppoll) {
1550         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1551         sigset_t *set = NULL;
1552 
1553         if (arg3) {
1554             if (time64) {
1555                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1556                     unlock_user(target_pfd, arg1, 0);
1557                     return -TARGET_EFAULT;
1558                 }
1559             } else {
1560                 if (target_to_host_timespec(timeout_ts, arg3)) {
1561                     unlock_user(target_pfd, arg1, 0);
1562                     return -TARGET_EFAULT;
1563                 }
1564             }
1565         } else {
1566             timeout_ts = NULL;
1567         }
1568 
1569         if (arg4) {
1570             ret = process_sigsuspend_mask(&set, arg4, arg5);
1571             if (ret != 0) {
1572                 unlock_user(target_pfd, arg1, 0);
1573                 return ret;
1574             }
1575         }
1576 
1577         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1578                                    set, SIGSET_T_SIZE));
1579 
1580         if (set) {
1581             finish_sigsuspend_mask(ret);
1582         }
1583         if (!is_error(ret) && arg3) {
1584             if (time64) {
1585                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1586                     return -TARGET_EFAULT;
1587                 }
1588             } else {
1589                 if (host_to_target_timespec(arg3, timeout_ts)) {
1590                     return -TARGET_EFAULT;
1591                 }
1592             }
1593         }
1594     } else {
1595           struct timespec ts, *pts;
1596 
1597           if (arg3 >= 0) {
1598               /* Convert ms to secs, ns */
1599               ts.tv_sec = arg3 / 1000;
1600               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1601               pts = &ts;
1602           } else {
1603               /* -ve poll() timeout means "infinite" */
1604               pts = NULL;
1605           }
1606           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1607     }
1608 
1609     if (!is_error(ret)) {
1610         for (i = 0; i < nfds; i++) {
1611             target_pfd[i].revents = tswap16(pfd[i].revents);
1612         }
1613     }
1614     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1615     return ret;
1616 }
1617 #endif
1618 
1619 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1620                         int flags, int is_pipe2)
1621 {
1622     int host_pipe[2];
1623     abi_long ret;
1624     ret = pipe2(host_pipe, flags);
1625 
1626     if (is_error(ret))
1627         return get_errno(ret);
1628 
1629     /* Several targets have special calling conventions for the original
1630        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1631     if (!is_pipe2) {
1632 #if defined(TARGET_ALPHA)
1633         cpu_env->ir[IR_A4] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_MIPS)
1636         cpu_env->active_tc.gpr[3] = host_pipe[1];
1637         return host_pipe[0];
1638 #elif defined(TARGET_SH4)
1639         cpu_env->gregs[1] = host_pipe[1];
1640         return host_pipe[0];
1641 #elif defined(TARGET_SPARC)
1642         cpu_env->regwptr[1] = host_pipe[1];
1643         return host_pipe[0];
1644 #endif
1645     }
1646 
1647     if (put_user_s32(host_pipe[0], pipedes)
1648         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1649         return -TARGET_EFAULT;
1650     return get_errno(ret);
1651 }
1652 
1653 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1654                                               abi_ulong target_addr,
1655                                               socklen_t len)
1656 {
1657     struct target_ip_mreqn *target_smreqn;
1658 
1659     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1660     if (!target_smreqn)
1661         return -TARGET_EFAULT;
1662     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1663     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1664     if (len == sizeof(struct target_ip_mreqn))
1665         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1666     unlock_user(target_smreqn, target_addr, 0);
1667 
1668     return 0;
1669 }
1670 
1671 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1672                                                abi_ulong target_addr,
1673                                                socklen_t len)
1674 {
1675     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1676     sa_family_t sa_family;
1677     struct target_sockaddr *target_saddr;
1678 
1679     if (fd_trans_target_to_host_addr(fd)) {
1680         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1681     }
1682 
1683     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1684     if (!target_saddr)
1685         return -TARGET_EFAULT;
1686 
1687     sa_family = tswap16(target_saddr->sa_family);
1688 
1689     /* Oops. The caller might send a incomplete sun_path; sun_path
1690      * must be terminated by \0 (see the manual page), but
1691      * unfortunately it is quite common to specify sockaddr_un
1692      * length as "strlen(x->sun_path)" while it should be
1693      * "strlen(...) + 1". We'll fix that here if needed.
1694      * Linux kernel has a similar feature.
1695      */
1696 
1697     if (sa_family == AF_UNIX) {
1698         if (len < unix_maxlen && len > 0) {
1699             char *cp = (char*)target_saddr;
1700 
1701             if ( cp[len-1] && !cp[len] )
1702                 len++;
1703         }
1704         if (len > unix_maxlen)
1705             len = unix_maxlen;
1706     }
1707 
1708     memcpy(addr, target_saddr, len);
1709     addr->sa_family = sa_family;
1710     if (sa_family == AF_NETLINK) {
1711         struct sockaddr_nl *nladdr;
1712 
1713         nladdr = (struct sockaddr_nl *)addr;
1714         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1715         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1716     } else if (sa_family == AF_PACKET) {
1717 	struct target_sockaddr_ll *lladdr;
1718 
1719 	lladdr = (struct target_sockaddr_ll *)addr;
1720 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1721 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1722     } else if (sa_family == AF_INET6) {
1723         struct sockaddr_in6 *in6addr;
1724 
1725         in6addr = (struct sockaddr_in6 *)addr;
1726         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1727     }
1728     unlock_user(target_saddr, target_addr, 0);
1729 
1730     return 0;
1731 }
1732 
1733 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1734                                                struct sockaddr *addr,
1735                                                socklen_t len)
1736 {
1737     struct target_sockaddr *target_saddr;
1738 
1739     if (len == 0) {
1740         return 0;
1741     }
1742     assert(addr);
1743 
1744     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1745     if (!target_saddr)
1746         return -TARGET_EFAULT;
1747     memcpy(target_saddr, addr, len);
1748     if (len >= offsetof(struct target_sockaddr, sa_family) +
1749         sizeof(target_saddr->sa_family)) {
1750         target_saddr->sa_family = tswap16(addr->sa_family);
1751     }
1752     if (addr->sa_family == AF_NETLINK &&
1753         len >= sizeof(struct target_sockaddr_nl)) {
1754         struct target_sockaddr_nl *target_nl =
1755                (struct target_sockaddr_nl *)target_saddr;
1756         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1757         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1758     } else if (addr->sa_family == AF_PACKET) {
1759         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1760         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1761         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1762     } else if (addr->sa_family == AF_INET6 &&
1763                len >= sizeof(struct target_sockaddr_in6)) {
1764         struct target_sockaddr_in6 *target_in6 =
1765                (struct target_sockaddr_in6 *)target_saddr;
1766         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1767     }
1768     unlock_user(target_saddr, target_addr, len);
1769 
1770     return 0;
1771 }
1772 
1773 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1774                                            struct target_msghdr *target_msgh)
1775 {
1776     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1777     abi_long msg_controllen;
1778     abi_ulong target_cmsg_addr;
1779     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1780     socklen_t space = 0;
1781 
1782     msg_controllen = tswapal(target_msgh->msg_controllen);
1783     if (msg_controllen < sizeof (struct target_cmsghdr))
1784         goto the_end;
1785     target_cmsg_addr = tswapal(target_msgh->msg_control);
1786     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1787     target_cmsg_start = target_cmsg;
1788     if (!target_cmsg)
1789         return -TARGET_EFAULT;
1790 
1791     while (cmsg && target_cmsg) {
1792         void *data = CMSG_DATA(cmsg);
1793         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1794 
1795         int len = tswapal(target_cmsg->cmsg_len)
1796             - sizeof(struct target_cmsghdr);
1797 
1798         space += CMSG_SPACE(len);
1799         if (space > msgh->msg_controllen) {
1800             space -= CMSG_SPACE(len);
1801             /* This is a QEMU bug, since we allocated the payload
1802              * area ourselves (unlike overflow in host-to-target
1803              * conversion, which is just the guest giving us a buffer
1804              * that's too small). It can't happen for the payload types
1805              * we currently support; if it becomes an issue in future
1806              * we would need to improve our allocation strategy to
1807              * something more intelligent than "twice the size of the
1808              * target buffer we're reading from".
1809              */
1810             qemu_log_mask(LOG_UNIMP,
1811                           ("Unsupported ancillary data %d/%d: "
1812                            "unhandled msg size\n"),
1813                           tswap32(target_cmsg->cmsg_level),
1814                           tswap32(target_cmsg->cmsg_type));
1815             break;
1816         }
1817 
1818         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1819             cmsg->cmsg_level = SOL_SOCKET;
1820         } else {
1821             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1822         }
1823         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1824         cmsg->cmsg_len = CMSG_LEN(len);
1825 
1826         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1827             int *fd = (int *)data;
1828             int *target_fd = (int *)target_data;
1829             int i, numfds = len / sizeof(int);
1830 
1831             for (i = 0; i < numfds; i++) {
1832                 __get_user(fd[i], target_fd + i);
1833             }
1834         } else if (cmsg->cmsg_level == SOL_SOCKET
1835                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1836             struct ucred *cred = (struct ucred *)data;
1837             struct target_ucred *target_cred =
1838                 (struct target_ucred *)target_data;
1839 
1840             __get_user(cred->pid, &target_cred->pid);
1841             __get_user(cred->uid, &target_cred->uid);
1842             __get_user(cred->gid, &target_cred->gid);
1843         } else if (cmsg->cmsg_level == SOL_ALG) {
1844             uint32_t *dst = (uint32_t *)data;
1845 
1846             memcpy(dst, target_data, len);
1847             /* fix endianess of first 32-bit word */
1848             if (len >= sizeof(uint32_t)) {
1849                 *dst = tswap32(*dst);
1850             }
1851         } else {
1852             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1853                           cmsg->cmsg_level, cmsg->cmsg_type);
1854             memcpy(data, target_data, len);
1855         }
1856 
1857         cmsg = CMSG_NXTHDR(msgh, cmsg);
1858         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1859                                          target_cmsg_start);
1860     }
1861     unlock_user(target_cmsg, target_cmsg_addr, 0);
1862  the_end:
1863     msgh->msg_controllen = space;
1864     return 0;
1865 }
1866 
1867 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1868                                            struct msghdr *msgh)
1869 {
1870     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1871     abi_long msg_controllen;
1872     abi_ulong target_cmsg_addr;
1873     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1874     socklen_t space = 0;
1875 
1876     msg_controllen = tswapal(target_msgh->msg_controllen);
1877     if (msg_controllen < sizeof (struct target_cmsghdr))
1878         goto the_end;
1879     target_cmsg_addr = tswapal(target_msgh->msg_control);
1880     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1881     target_cmsg_start = target_cmsg;
1882     if (!target_cmsg)
1883         return -TARGET_EFAULT;
1884 
1885     while (cmsg && target_cmsg) {
1886         void *data = CMSG_DATA(cmsg);
1887         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1888 
1889         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1890         int tgt_len, tgt_space;
1891 
1892         /* We never copy a half-header but may copy half-data;
1893          * this is Linux's behaviour in put_cmsg(). Note that
1894          * truncation here is a guest problem (which we report
1895          * to the guest via the CTRUNC bit), unlike truncation
1896          * in target_to_host_cmsg, which is a QEMU bug.
1897          */
1898         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1899             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1900             break;
1901         }
1902 
1903         if (cmsg->cmsg_level == SOL_SOCKET) {
1904             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1905         } else {
1906             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1907         }
1908         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1909 
1910         /* Payload types which need a different size of payload on
1911          * the target must adjust tgt_len here.
1912          */
1913         tgt_len = len;
1914         switch (cmsg->cmsg_level) {
1915         case SOL_SOCKET:
1916             switch (cmsg->cmsg_type) {
1917             case SO_TIMESTAMP:
1918                 tgt_len = sizeof(struct target_timeval);
1919                 break;
1920             default:
1921                 break;
1922             }
1923             break;
1924         default:
1925             break;
1926         }
1927 
1928         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1929             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1930             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1931         }
1932 
1933         /* We must now copy-and-convert len bytes of payload
1934          * into tgt_len bytes of destination space. Bear in mind
1935          * that in both source and destination we may be dealing
1936          * with a truncated value!
1937          */
1938         switch (cmsg->cmsg_level) {
1939         case SOL_SOCKET:
1940             switch (cmsg->cmsg_type) {
1941             case SCM_RIGHTS:
1942             {
1943                 int *fd = (int *)data;
1944                 int *target_fd = (int *)target_data;
1945                 int i, numfds = tgt_len / sizeof(int);
1946 
1947                 for (i = 0; i < numfds; i++) {
1948                     __put_user(fd[i], target_fd + i);
1949                 }
1950                 break;
1951             }
1952             case SO_TIMESTAMP:
1953             {
1954                 struct timeval *tv = (struct timeval *)data;
1955                 struct target_timeval *target_tv =
1956                     (struct target_timeval *)target_data;
1957 
1958                 if (len != sizeof(struct timeval) ||
1959                     tgt_len != sizeof(struct target_timeval)) {
1960                     goto unimplemented;
1961                 }
1962 
1963                 /* copy struct timeval to target */
1964                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1965                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1966                 break;
1967             }
1968             case SCM_CREDENTIALS:
1969             {
1970                 struct ucred *cred = (struct ucred *)data;
1971                 struct target_ucred *target_cred =
1972                     (struct target_ucred *)target_data;
1973 
1974                 __put_user(cred->pid, &target_cred->pid);
1975                 __put_user(cred->uid, &target_cred->uid);
1976                 __put_user(cred->gid, &target_cred->gid);
1977                 break;
1978             }
1979             default:
1980                 goto unimplemented;
1981             }
1982             break;
1983 
1984         case SOL_IP:
1985             switch (cmsg->cmsg_type) {
1986             case IP_TTL:
1987             {
1988                 uint32_t *v = (uint32_t *)data;
1989                 uint32_t *t_int = (uint32_t *)target_data;
1990 
1991                 if (len != sizeof(uint32_t) ||
1992                     tgt_len != sizeof(uint32_t)) {
1993                     goto unimplemented;
1994                 }
1995                 __put_user(*v, t_int);
1996                 break;
1997             }
1998             case IP_RECVERR:
1999             {
2000                 struct errhdr_t {
2001                    struct sock_extended_err ee;
2002                    struct sockaddr_in offender;
2003                 };
2004                 struct errhdr_t *errh = (struct errhdr_t *)data;
2005                 struct errhdr_t *target_errh =
2006                     (struct errhdr_t *)target_data;
2007 
2008                 if (len != sizeof(struct errhdr_t) ||
2009                     tgt_len != sizeof(struct errhdr_t)) {
2010                     goto unimplemented;
2011                 }
2012                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2013                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2014                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2015                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2016                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2017                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2018                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2019                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2020                     (void *) &errh->offender, sizeof(errh->offender));
2021                 break;
2022             }
2023             default:
2024                 goto unimplemented;
2025             }
2026             break;
2027 
2028         case SOL_IPV6:
2029             switch (cmsg->cmsg_type) {
2030             case IPV6_HOPLIMIT:
2031             {
2032                 uint32_t *v = (uint32_t *)data;
2033                 uint32_t *t_int = (uint32_t *)target_data;
2034 
2035                 if (len != sizeof(uint32_t) ||
2036                     tgt_len != sizeof(uint32_t)) {
2037                     goto unimplemented;
2038                 }
2039                 __put_user(*v, t_int);
2040                 break;
2041             }
2042             case IPV6_RECVERR:
2043             {
2044                 struct errhdr6_t {
2045                    struct sock_extended_err ee;
2046                    struct sockaddr_in6 offender;
2047                 };
2048                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2049                 struct errhdr6_t *target_errh =
2050                     (struct errhdr6_t *)target_data;
2051 
2052                 if (len != sizeof(struct errhdr6_t) ||
2053                     tgt_len != sizeof(struct errhdr6_t)) {
2054                     goto unimplemented;
2055                 }
2056                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2057                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2058                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2059                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2060                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2061                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2062                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2063                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2064                     (void *) &errh->offender, sizeof(errh->offender));
2065                 break;
2066             }
2067             default:
2068                 goto unimplemented;
2069             }
2070             break;
2071 
2072         default:
2073         unimplemented:
2074             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2075                           cmsg->cmsg_level, cmsg->cmsg_type);
2076             memcpy(target_data, data, MIN(len, tgt_len));
2077             if (tgt_len > len) {
2078                 memset(target_data + len, 0, tgt_len - len);
2079             }
2080         }
2081 
2082         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2083         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2084         if (msg_controllen < tgt_space) {
2085             tgt_space = msg_controllen;
2086         }
2087         msg_controllen -= tgt_space;
2088         space += tgt_space;
2089         cmsg = CMSG_NXTHDR(msgh, cmsg);
2090         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2091                                          target_cmsg_start);
2092     }
2093     unlock_user(target_cmsg, target_cmsg_addr, space);
2094  the_end:
2095     target_msgh->msg_controllen = tswapal(space);
2096     return 0;
2097 }
2098 
2099 /* do_setsockopt() Must return target values and target errnos. */
2100 static abi_long do_setsockopt(int sockfd, int level, int optname,
2101                               abi_ulong optval_addr, socklen_t optlen)
2102 {
2103     abi_long ret;
2104     int val;
2105     struct ip_mreqn *ip_mreq;
2106     struct ip_mreq_source *ip_mreq_source;
2107 
2108     switch(level) {
2109     case SOL_TCP:
2110     case SOL_UDP:
2111         /* TCP and UDP options all take an 'int' value.  */
2112         if (optlen < sizeof(uint32_t))
2113             return -TARGET_EINVAL;
2114 
2115         if (get_user_u32(val, optval_addr))
2116             return -TARGET_EFAULT;
2117         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2118         break;
2119     case SOL_IP:
2120         switch(optname) {
2121         case IP_TOS:
2122         case IP_TTL:
2123         case IP_HDRINCL:
2124         case IP_ROUTER_ALERT:
2125         case IP_RECVOPTS:
2126         case IP_RETOPTS:
2127         case IP_PKTINFO:
2128         case IP_MTU_DISCOVER:
2129         case IP_RECVERR:
2130         case IP_RECVTTL:
2131         case IP_RECVTOS:
2132 #ifdef IP_FREEBIND
2133         case IP_FREEBIND:
2134 #endif
2135         case IP_MULTICAST_TTL:
2136         case IP_MULTICAST_LOOP:
2137             val = 0;
2138             if (optlen >= sizeof(uint32_t)) {
2139                 if (get_user_u32(val, optval_addr))
2140                     return -TARGET_EFAULT;
2141             } else if (optlen >= 1) {
2142                 if (get_user_u8(val, optval_addr))
2143                     return -TARGET_EFAULT;
2144             }
2145             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2146             break;
2147         case IP_ADD_MEMBERSHIP:
2148         case IP_DROP_MEMBERSHIP:
2149             if (optlen < sizeof (struct target_ip_mreq) ||
2150                 optlen > sizeof (struct target_ip_mreqn))
2151                 return -TARGET_EINVAL;
2152 
2153             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2154             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2155             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2156             break;
2157 
2158         case IP_BLOCK_SOURCE:
2159         case IP_UNBLOCK_SOURCE:
2160         case IP_ADD_SOURCE_MEMBERSHIP:
2161         case IP_DROP_SOURCE_MEMBERSHIP:
2162             if (optlen != sizeof (struct target_ip_mreq_source))
2163                 return -TARGET_EINVAL;
2164 
2165             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2166             if (!ip_mreq_source) {
2167                 return -TARGET_EFAULT;
2168             }
2169             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2170             unlock_user (ip_mreq_source, optval_addr, 0);
2171             break;
2172 
2173         default:
2174             goto unimplemented;
2175         }
2176         break;
2177     case SOL_IPV6:
2178         switch (optname) {
2179         case IPV6_MTU_DISCOVER:
2180         case IPV6_MTU:
2181         case IPV6_V6ONLY:
2182         case IPV6_RECVPKTINFO:
2183         case IPV6_UNICAST_HOPS:
2184         case IPV6_MULTICAST_HOPS:
2185         case IPV6_MULTICAST_LOOP:
2186         case IPV6_RECVERR:
2187         case IPV6_RECVHOPLIMIT:
2188         case IPV6_2292HOPLIMIT:
2189         case IPV6_CHECKSUM:
2190         case IPV6_ADDRFORM:
2191         case IPV6_2292PKTINFO:
2192         case IPV6_RECVTCLASS:
2193         case IPV6_RECVRTHDR:
2194         case IPV6_2292RTHDR:
2195         case IPV6_RECVHOPOPTS:
2196         case IPV6_2292HOPOPTS:
2197         case IPV6_RECVDSTOPTS:
2198         case IPV6_2292DSTOPTS:
2199         case IPV6_TCLASS:
2200         case IPV6_ADDR_PREFERENCES:
2201 #ifdef IPV6_RECVPATHMTU
2202         case IPV6_RECVPATHMTU:
2203 #endif
2204 #ifdef IPV6_TRANSPARENT
2205         case IPV6_TRANSPARENT:
2206 #endif
2207 #ifdef IPV6_FREEBIND
2208         case IPV6_FREEBIND:
2209 #endif
2210 #ifdef IPV6_RECVORIGDSTADDR
2211         case IPV6_RECVORIGDSTADDR:
2212 #endif
2213             val = 0;
2214             if (optlen < sizeof(uint32_t)) {
2215                 return -TARGET_EINVAL;
2216             }
2217             if (get_user_u32(val, optval_addr)) {
2218                 return -TARGET_EFAULT;
2219             }
2220             ret = get_errno(setsockopt(sockfd, level, optname,
2221                                        &val, sizeof(val)));
2222             break;
2223         case IPV6_PKTINFO:
2224         {
2225             struct in6_pktinfo pki;
2226 
2227             if (optlen < sizeof(pki)) {
2228                 return -TARGET_EINVAL;
2229             }
2230 
2231             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2232                 return -TARGET_EFAULT;
2233             }
2234 
2235             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2236 
2237             ret = get_errno(setsockopt(sockfd, level, optname,
2238                                        &pki, sizeof(pki)));
2239             break;
2240         }
2241         case IPV6_ADD_MEMBERSHIP:
2242         case IPV6_DROP_MEMBERSHIP:
2243         {
2244             struct ipv6_mreq ipv6mreq;
2245 
2246             if (optlen < sizeof(ipv6mreq)) {
2247                 return -TARGET_EINVAL;
2248             }
2249 
2250             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2251                 return -TARGET_EFAULT;
2252             }
2253 
2254             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2255 
2256             ret = get_errno(setsockopt(sockfd, level, optname,
2257                                        &ipv6mreq, sizeof(ipv6mreq)));
2258             break;
2259         }
2260         default:
2261             goto unimplemented;
2262         }
2263         break;
2264     case SOL_ICMPV6:
2265         switch (optname) {
2266         case ICMPV6_FILTER:
2267         {
2268             struct icmp6_filter icmp6f;
2269 
2270             if (optlen > sizeof(icmp6f)) {
2271                 optlen = sizeof(icmp6f);
2272             }
2273 
2274             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2275                 return -TARGET_EFAULT;
2276             }
2277 
2278             for (val = 0; val < 8; val++) {
2279                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2280             }
2281 
2282             ret = get_errno(setsockopt(sockfd, level, optname,
2283                                        &icmp6f, optlen));
2284             break;
2285         }
2286         default:
2287             goto unimplemented;
2288         }
2289         break;
2290     case SOL_RAW:
2291         switch (optname) {
2292         case ICMP_FILTER:
2293         case IPV6_CHECKSUM:
2294             /* those take an u32 value */
2295             if (optlen < sizeof(uint32_t)) {
2296                 return -TARGET_EINVAL;
2297             }
2298 
2299             if (get_user_u32(val, optval_addr)) {
2300                 return -TARGET_EFAULT;
2301             }
2302             ret = get_errno(setsockopt(sockfd, level, optname,
2303                                        &val, sizeof(val)));
2304             break;
2305 
2306         default:
2307             goto unimplemented;
2308         }
2309         break;
2310 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2311     case SOL_ALG:
2312         switch (optname) {
2313         case ALG_SET_KEY:
2314         {
2315             char *alg_key = g_malloc(optlen);
2316 
2317             if (!alg_key) {
2318                 return -TARGET_ENOMEM;
2319             }
2320             if (copy_from_user(alg_key, optval_addr, optlen)) {
2321                 g_free(alg_key);
2322                 return -TARGET_EFAULT;
2323             }
2324             ret = get_errno(setsockopt(sockfd, level, optname,
2325                                        alg_key, optlen));
2326             g_free(alg_key);
2327             break;
2328         }
2329         case ALG_SET_AEAD_AUTHSIZE:
2330         {
2331             ret = get_errno(setsockopt(sockfd, level, optname,
2332                                        NULL, optlen));
2333             break;
2334         }
2335         default:
2336             goto unimplemented;
2337         }
2338         break;
2339 #endif
2340     case TARGET_SOL_SOCKET:
2341         switch (optname) {
2342         case TARGET_SO_RCVTIMEO:
2343         {
2344                 struct timeval tv;
2345 
2346                 optname = SO_RCVTIMEO;
2347 
2348 set_timeout:
2349                 if (optlen != sizeof(struct target_timeval)) {
2350                     return -TARGET_EINVAL;
2351                 }
2352 
2353                 if (copy_from_user_timeval(&tv, optval_addr)) {
2354                     return -TARGET_EFAULT;
2355                 }
2356 
2357                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2358                                 &tv, sizeof(tv)));
2359                 return ret;
2360         }
2361         case TARGET_SO_SNDTIMEO:
2362                 optname = SO_SNDTIMEO;
2363                 goto set_timeout;
2364         case TARGET_SO_ATTACH_FILTER:
2365         {
2366                 struct target_sock_fprog *tfprog;
2367                 struct target_sock_filter *tfilter;
2368                 struct sock_fprog fprog;
2369                 struct sock_filter *filter;
2370                 int i;
2371 
2372                 if (optlen != sizeof(*tfprog)) {
2373                     return -TARGET_EINVAL;
2374                 }
2375                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2376                     return -TARGET_EFAULT;
2377                 }
2378                 if (!lock_user_struct(VERIFY_READ, tfilter,
2379                                       tswapal(tfprog->filter), 0)) {
2380                     unlock_user_struct(tfprog, optval_addr, 1);
2381                     return -TARGET_EFAULT;
2382                 }
2383 
2384                 fprog.len = tswap16(tfprog->len);
2385                 filter = g_try_new(struct sock_filter, fprog.len);
2386                 if (filter == NULL) {
2387                     unlock_user_struct(tfilter, tfprog->filter, 1);
2388                     unlock_user_struct(tfprog, optval_addr, 1);
2389                     return -TARGET_ENOMEM;
2390                 }
2391                 for (i = 0; i < fprog.len; i++) {
2392                     filter[i].code = tswap16(tfilter[i].code);
2393                     filter[i].jt = tfilter[i].jt;
2394                     filter[i].jf = tfilter[i].jf;
2395                     filter[i].k = tswap32(tfilter[i].k);
2396                 }
2397                 fprog.filter = filter;
2398 
2399                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2400                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2401                 g_free(filter);
2402 
2403                 unlock_user_struct(tfilter, tfprog->filter, 1);
2404                 unlock_user_struct(tfprog, optval_addr, 1);
2405                 return ret;
2406         }
2407 	case TARGET_SO_BINDTODEVICE:
2408 	{
2409 		char *dev_ifname, *addr_ifname;
2410 
2411 		if (optlen > IFNAMSIZ - 1) {
2412 		    optlen = IFNAMSIZ - 1;
2413 		}
2414 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2415 		if (!dev_ifname) {
2416 		    return -TARGET_EFAULT;
2417 		}
2418 		optname = SO_BINDTODEVICE;
2419 		addr_ifname = alloca(IFNAMSIZ);
2420 		memcpy(addr_ifname, dev_ifname, optlen);
2421 		addr_ifname[optlen] = 0;
2422 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2423                                            addr_ifname, optlen));
2424 		unlock_user (dev_ifname, optval_addr, 0);
2425 		return ret;
2426 	}
2427         case TARGET_SO_LINGER:
2428         {
2429                 struct linger lg;
2430                 struct target_linger *tlg;
2431 
2432                 if (optlen != sizeof(struct target_linger)) {
2433                     return -TARGET_EINVAL;
2434                 }
2435                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2436                     return -TARGET_EFAULT;
2437                 }
2438                 __get_user(lg.l_onoff, &tlg->l_onoff);
2439                 __get_user(lg.l_linger, &tlg->l_linger);
2440                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2441                                 &lg, sizeof(lg)));
2442                 unlock_user_struct(tlg, optval_addr, 0);
2443                 return ret;
2444         }
2445             /* Options with 'int' argument.  */
2446         case TARGET_SO_DEBUG:
2447 		optname = SO_DEBUG;
2448 		break;
2449         case TARGET_SO_REUSEADDR:
2450 		optname = SO_REUSEADDR;
2451 		break;
2452 #ifdef SO_REUSEPORT
2453         case TARGET_SO_REUSEPORT:
2454                 optname = SO_REUSEPORT;
2455                 break;
2456 #endif
2457         case TARGET_SO_TYPE:
2458 		optname = SO_TYPE;
2459 		break;
2460         case TARGET_SO_ERROR:
2461 		optname = SO_ERROR;
2462 		break;
2463         case TARGET_SO_DONTROUTE:
2464 		optname = SO_DONTROUTE;
2465 		break;
2466         case TARGET_SO_BROADCAST:
2467 		optname = SO_BROADCAST;
2468 		break;
2469         case TARGET_SO_SNDBUF:
2470 		optname = SO_SNDBUF;
2471 		break;
2472         case TARGET_SO_SNDBUFFORCE:
2473                 optname = SO_SNDBUFFORCE;
2474                 break;
2475         case TARGET_SO_RCVBUF:
2476 		optname = SO_RCVBUF;
2477 		break;
2478         case TARGET_SO_RCVBUFFORCE:
2479                 optname = SO_RCVBUFFORCE;
2480                 break;
2481         case TARGET_SO_KEEPALIVE:
2482 		optname = SO_KEEPALIVE;
2483 		break;
2484         case TARGET_SO_OOBINLINE:
2485 		optname = SO_OOBINLINE;
2486 		break;
2487         case TARGET_SO_NO_CHECK:
2488 		optname = SO_NO_CHECK;
2489 		break;
2490         case TARGET_SO_PRIORITY:
2491 		optname = SO_PRIORITY;
2492 		break;
2493 #ifdef SO_BSDCOMPAT
2494         case TARGET_SO_BSDCOMPAT:
2495 		optname = SO_BSDCOMPAT;
2496 		break;
2497 #endif
2498         case TARGET_SO_PASSCRED:
2499 		optname = SO_PASSCRED;
2500 		break;
2501         case TARGET_SO_PASSSEC:
2502                 optname = SO_PASSSEC;
2503                 break;
2504         case TARGET_SO_TIMESTAMP:
2505 		optname = SO_TIMESTAMP;
2506 		break;
2507         case TARGET_SO_RCVLOWAT:
2508 		optname = SO_RCVLOWAT;
2509 		break;
2510         default:
2511             goto unimplemented;
2512         }
2513 	if (optlen < sizeof(uint32_t))
2514             return -TARGET_EINVAL;
2515 
2516 	if (get_user_u32(val, optval_addr))
2517             return -TARGET_EFAULT;
2518 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2519         break;
2520 #ifdef SOL_NETLINK
2521     case SOL_NETLINK:
2522         switch (optname) {
2523         case NETLINK_PKTINFO:
2524         case NETLINK_ADD_MEMBERSHIP:
2525         case NETLINK_DROP_MEMBERSHIP:
2526         case NETLINK_BROADCAST_ERROR:
2527         case NETLINK_NO_ENOBUFS:
2528 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2529         case NETLINK_LISTEN_ALL_NSID:
2530         case NETLINK_CAP_ACK:
2531 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2532 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2533         case NETLINK_EXT_ACK:
2534 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2535 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2536         case NETLINK_GET_STRICT_CHK:
2537 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2538             break;
2539         default:
2540             goto unimplemented;
2541         }
2542         val = 0;
2543         if (optlen < sizeof(uint32_t)) {
2544             return -TARGET_EINVAL;
2545         }
2546         if (get_user_u32(val, optval_addr)) {
2547             return -TARGET_EFAULT;
2548         }
2549         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2550                                    sizeof(val)));
2551         break;
2552 #endif /* SOL_NETLINK */
2553     default:
2554     unimplemented:
2555         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2556                       level, optname);
2557         ret = -TARGET_ENOPROTOOPT;
2558     }
2559     return ret;
2560 }
2561 
2562 /* do_getsockopt() Must return target values and target errnos. */
2563 static abi_long do_getsockopt(int sockfd, int level, int optname,
2564                               abi_ulong optval_addr, abi_ulong optlen)
2565 {
2566     abi_long ret;
2567     int len, val;
2568     socklen_t lv;
2569 
2570     switch(level) {
2571     case TARGET_SOL_SOCKET:
2572         level = SOL_SOCKET;
2573         switch (optname) {
2574         /* These don't just return a single integer */
2575         case TARGET_SO_PEERNAME:
2576             goto unimplemented;
2577         case TARGET_SO_RCVTIMEO: {
2578             struct timeval tv;
2579             socklen_t tvlen;
2580 
2581             optname = SO_RCVTIMEO;
2582 
2583 get_timeout:
2584             if (get_user_u32(len, optlen)) {
2585                 return -TARGET_EFAULT;
2586             }
2587             if (len < 0) {
2588                 return -TARGET_EINVAL;
2589             }
2590 
2591             tvlen = sizeof(tv);
2592             ret = get_errno(getsockopt(sockfd, level, optname,
2593                                        &tv, &tvlen));
2594             if (ret < 0) {
2595                 return ret;
2596             }
2597             if (len > sizeof(struct target_timeval)) {
2598                 len = sizeof(struct target_timeval);
2599             }
2600             if (copy_to_user_timeval(optval_addr, &tv)) {
2601                 return -TARGET_EFAULT;
2602             }
2603             if (put_user_u32(len, optlen)) {
2604                 return -TARGET_EFAULT;
2605             }
2606             break;
2607         }
2608         case TARGET_SO_SNDTIMEO:
2609             optname = SO_SNDTIMEO;
2610             goto get_timeout;
2611         case TARGET_SO_PEERCRED: {
2612             struct ucred cr;
2613             socklen_t crlen;
2614             struct target_ucred *tcr;
2615 
2616             if (get_user_u32(len, optlen)) {
2617                 return -TARGET_EFAULT;
2618             }
2619             if (len < 0) {
2620                 return -TARGET_EINVAL;
2621             }
2622 
2623             crlen = sizeof(cr);
2624             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2625                                        &cr, &crlen));
2626             if (ret < 0) {
2627                 return ret;
2628             }
2629             if (len > crlen) {
2630                 len = crlen;
2631             }
2632             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2633                 return -TARGET_EFAULT;
2634             }
2635             __put_user(cr.pid, &tcr->pid);
2636             __put_user(cr.uid, &tcr->uid);
2637             __put_user(cr.gid, &tcr->gid);
2638             unlock_user_struct(tcr, optval_addr, 1);
2639             if (put_user_u32(len, optlen)) {
2640                 return -TARGET_EFAULT;
2641             }
2642             break;
2643         }
2644         case TARGET_SO_PEERSEC: {
2645             char *name;
2646 
2647             if (get_user_u32(len, optlen)) {
2648                 return -TARGET_EFAULT;
2649             }
2650             if (len < 0) {
2651                 return -TARGET_EINVAL;
2652             }
2653             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2654             if (!name) {
2655                 return -TARGET_EFAULT;
2656             }
2657             lv = len;
2658             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2659                                        name, &lv));
2660             if (put_user_u32(lv, optlen)) {
2661                 ret = -TARGET_EFAULT;
2662             }
2663             unlock_user(name, optval_addr, lv);
2664             break;
2665         }
2666         case TARGET_SO_LINGER:
2667         {
2668             struct linger lg;
2669             socklen_t lglen;
2670             struct target_linger *tlg;
2671 
2672             if (get_user_u32(len, optlen)) {
2673                 return -TARGET_EFAULT;
2674             }
2675             if (len < 0) {
2676                 return -TARGET_EINVAL;
2677             }
2678 
2679             lglen = sizeof(lg);
2680             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2681                                        &lg, &lglen));
2682             if (ret < 0) {
2683                 return ret;
2684             }
2685             if (len > lglen) {
2686                 len = lglen;
2687             }
2688             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2689                 return -TARGET_EFAULT;
2690             }
2691             __put_user(lg.l_onoff, &tlg->l_onoff);
2692             __put_user(lg.l_linger, &tlg->l_linger);
2693             unlock_user_struct(tlg, optval_addr, 1);
2694             if (put_user_u32(len, optlen)) {
2695                 return -TARGET_EFAULT;
2696             }
2697             break;
2698         }
2699         /* Options with 'int' argument.  */
2700         case TARGET_SO_DEBUG:
2701             optname = SO_DEBUG;
2702             goto int_case;
2703         case TARGET_SO_REUSEADDR:
2704             optname = SO_REUSEADDR;
2705             goto int_case;
2706 #ifdef SO_REUSEPORT
2707         case TARGET_SO_REUSEPORT:
2708             optname = SO_REUSEPORT;
2709             goto int_case;
2710 #endif
2711         case TARGET_SO_TYPE:
2712             optname = SO_TYPE;
2713             goto int_case;
2714         case TARGET_SO_ERROR:
2715             optname = SO_ERROR;
2716             goto int_case;
2717         case TARGET_SO_DONTROUTE:
2718             optname = SO_DONTROUTE;
2719             goto int_case;
2720         case TARGET_SO_BROADCAST:
2721             optname = SO_BROADCAST;
2722             goto int_case;
2723         case TARGET_SO_SNDBUF:
2724             optname = SO_SNDBUF;
2725             goto int_case;
2726         case TARGET_SO_RCVBUF:
2727             optname = SO_RCVBUF;
2728             goto int_case;
2729         case TARGET_SO_KEEPALIVE:
2730             optname = SO_KEEPALIVE;
2731             goto int_case;
2732         case TARGET_SO_OOBINLINE:
2733             optname = SO_OOBINLINE;
2734             goto int_case;
2735         case TARGET_SO_NO_CHECK:
2736             optname = SO_NO_CHECK;
2737             goto int_case;
2738         case TARGET_SO_PRIORITY:
2739             optname = SO_PRIORITY;
2740             goto int_case;
2741 #ifdef SO_BSDCOMPAT
2742         case TARGET_SO_BSDCOMPAT:
2743             optname = SO_BSDCOMPAT;
2744             goto int_case;
2745 #endif
2746         case TARGET_SO_PASSCRED:
2747             optname = SO_PASSCRED;
2748             goto int_case;
2749         case TARGET_SO_TIMESTAMP:
2750             optname = SO_TIMESTAMP;
2751             goto int_case;
2752         case TARGET_SO_RCVLOWAT:
2753             optname = SO_RCVLOWAT;
2754             goto int_case;
2755         case TARGET_SO_ACCEPTCONN:
2756             optname = SO_ACCEPTCONN;
2757             goto int_case;
2758         case TARGET_SO_PROTOCOL:
2759             optname = SO_PROTOCOL;
2760             goto int_case;
2761         case TARGET_SO_DOMAIN:
2762             optname = SO_DOMAIN;
2763             goto int_case;
2764         default:
2765             goto int_case;
2766         }
2767         break;
2768     case SOL_TCP:
2769     case SOL_UDP:
2770         /* TCP and UDP options all take an 'int' value.  */
2771     int_case:
2772         if (get_user_u32(len, optlen))
2773             return -TARGET_EFAULT;
2774         if (len < 0)
2775             return -TARGET_EINVAL;
2776         lv = sizeof(lv);
2777         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2778         if (ret < 0)
2779             return ret;
2780         switch (optname) {
2781         case SO_TYPE:
2782             val = host_to_target_sock_type(val);
2783             break;
2784         case SO_ERROR:
2785             val = host_to_target_errno(val);
2786             break;
2787         }
2788         if (len > lv)
2789             len = lv;
2790         if (len == 4) {
2791             if (put_user_u32(val, optval_addr))
2792                 return -TARGET_EFAULT;
2793         } else {
2794             if (put_user_u8(val, optval_addr))
2795                 return -TARGET_EFAULT;
2796         }
2797         if (put_user_u32(len, optlen))
2798             return -TARGET_EFAULT;
2799         break;
2800     case SOL_IP:
2801         switch(optname) {
2802         case IP_TOS:
2803         case IP_TTL:
2804         case IP_HDRINCL:
2805         case IP_ROUTER_ALERT:
2806         case IP_RECVOPTS:
2807         case IP_RETOPTS:
2808         case IP_PKTINFO:
2809         case IP_MTU_DISCOVER:
2810         case IP_RECVERR:
2811         case IP_RECVTOS:
2812 #ifdef IP_FREEBIND
2813         case IP_FREEBIND:
2814 #endif
2815         case IP_MULTICAST_TTL:
2816         case IP_MULTICAST_LOOP:
2817             if (get_user_u32(len, optlen))
2818                 return -TARGET_EFAULT;
2819             if (len < 0)
2820                 return -TARGET_EINVAL;
2821             lv = sizeof(lv);
2822             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2823             if (ret < 0)
2824                 return ret;
2825             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2826                 len = 1;
2827                 if (put_user_u32(len, optlen)
2828                     || put_user_u8(val, optval_addr))
2829                     return -TARGET_EFAULT;
2830             } else {
2831                 if (len > sizeof(int))
2832                     len = sizeof(int);
2833                 if (put_user_u32(len, optlen)
2834                     || put_user_u32(val, optval_addr))
2835                     return -TARGET_EFAULT;
2836             }
2837             break;
2838         default:
2839             ret = -TARGET_ENOPROTOOPT;
2840             break;
2841         }
2842         break;
2843     case SOL_IPV6:
2844         switch (optname) {
2845         case IPV6_MTU_DISCOVER:
2846         case IPV6_MTU:
2847         case IPV6_V6ONLY:
2848         case IPV6_RECVPKTINFO:
2849         case IPV6_UNICAST_HOPS:
2850         case IPV6_MULTICAST_HOPS:
2851         case IPV6_MULTICAST_LOOP:
2852         case IPV6_RECVERR:
2853         case IPV6_RECVHOPLIMIT:
2854         case IPV6_2292HOPLIMIT:
2855         case IPV6_CHECKSUM:
2856         case IPV6_ADDRFORM:
2857         case IPV6_2292PKTINFO:
2858         case IPV6_RECVTCLASS:
2859         case IPV6_RECVRTHDR:
2860         case IPV6_2292RTHDR:
2861         case IPV6_RECVHOPOPTS:
2862         case IPV6_2292HOPOPTS:
2863         case IPV6_RECVDSTOPTS:
2864         case IPV6_2292DSTOPTS:
2865         case IPV6_TCLASS:
2866         case IPV6_ADDR_PREFERENCES:
2867 #ifdef IPV6_RECVPATHMTU
2868         case IPV6_RECVPATHMTU:
2869 #endif
2870 #ifdef IPV6_TRANSPARENT
2871         case IPV6_TRANSPARENT:
2872 #endif
2873 #ifdef IPV6_FREEBIND
2874         case IPV6_FREEBIND:
2875 #endif
2876 #ifdef IPV6_RECVORIGDSTADDR
2877         case IPV6_RECVORIGDSTADDR:
2878 #endif
2879             if (get_user_u32(len, optlen))
2880                 return -TARGET_EFAULT;
2881             if (len < 0)
2882                 return -TARGET_EINVAL;
2883             lv = sizeof(lv);
2884             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2885             if (ret < 0)
2886                 return ret;
2887             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2888                 len = 1;
2889                 if (put_user_u32(len, optlen)
2890                     || put_user_u8(val, optval_addr))
2891                     return -TARGET_EFAULT;
2892             } else {
2893                 if (len > sizeof(int))
2894                     len = sizeof(int);
2895                 if (put_user_u32(len, optlen)
2896                     || put_user_u32(val, optval_addr))
2897                     return -TARGET_EFAULT;
2898             }
2899             break;
2900         default:
2901             ret = -TARGET_ENOPROTOOPT;
2902             break;
2903         }
2904         break;
2905 #ifdef SOL_NETLINK
2906     case SOL_NETLINK:
2907         switch (optname) {
2908         case NETLINK_PKTINFO:
2909         case NETLINK_BROADCAST_ERROR:
2910         case NETLINK_NO_ENOBUFS:
2911 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2912         case NETLINK_LISTEN_ALL_NSID:
2913         case NETLINK_CAP_ACK:
2914 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2915 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2916         case NETLINK_EXT_ACK:
2917 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2918 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2919         case NETLINK_GET_STRICT_CHK:
2920 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2921             if (get_user_u32(len, optlen)) {
2922                 return -TARGET_EFAULT;
2923             }
2924             if (len != sizeof(val)) {
2925                 return -TARGET_EINVAL;
2926             }
2927             lv = len;
2928             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2929             if (ret < 0) {
2930                 return ret;
2931             }
2932             if (put_user_u32(lv, optlen)
2933                 || put_user_u32(val, optval_addr)) {
2934                 return -TARGET_EFAULT;
2935             }
2936             break;
2937 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2938         case NETLINK_LIST_MEMBERSHIPS:
2939         {
2940             uint32_t *results;
2941             int i;
2942             if (get_user_u32(len, optlen)) {
2943                 return -TARGET_EFAULT;
2944             }
2945             if (len < 0) {
2946                 return -TARGET_EINVAL;
2947             }
2948             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2949             if (!results && len > 0) {
2950                 return -TARGET_EFAULT;
2951             }
2952             lv = len;
2953             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2954             if (ret < 0) {
2955                 unlock_user(results, optval_addr, 0);
2956                 return ret;
2957             }
2958             /* swap host endianess to target endianess. */
2959             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2960                 results[i] = tswap32(results[i]);
2961             }
2962             if (put_user_u32(lv, optlen)) {
2963                 return -TARGET_EFAULT;
2964             }
2965             unlock_user(results, optval_addr, 0);
2966             break;
2967         }
2968 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2969         default:
2970             goto unimplemented;
2971         }
2972         break;
2973 #endif /* SOL_NETLINK */
2974     default:
2975     unimplemented:
2976         qemu_log_mask(LOG_UNIMP,
2977                       "getsockopt level=%d optname=%d not yet supported\n",
2978                       level, optname);
2979         ret = -TARGET_EOPNOTSUPP;
2980         break;
2981     }
2982     return ret;
2983 }
2984 
2985 /* Convert target low/high pair representing file offset into the host
2986  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2987  * as the kernel doesn't handle them either.
2988  */
2989 static void target_to_host_low_high(abi_ulong tlow,
2990                                     abi_ulong thigh,
2991                                     unsigned long *hlow,
2992                                     unsigned long *hhigh)
2993 {
2994     uint64_t off = tlow |
2995         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2996         TARGET_LONG_BITS / 2;
2997 
2998     *hlow = off;
2999     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3000 }
3001 
3002 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3003                                 abi_ulong count, int copy)
3004 {
3005     struct target_iovec *target_vec;
3006     struct iovec *vec;
3007     abi_ulong total_len, max_len;
3008     int i;
3009     int err = 0;
3010     bool bad_address = false;
3011 
3012     if (count == 0) {
3013         errno = 0;
3014         return NULL;
3015     }
3016     if (count > IOV_MAX) {
3017         errno = EINVAL;
3018         return NULL;
3019     }
3020 
3021     vec = g_try_new0(struct iovec, count);
3022     if (vec == NULL) {
3023         errno = ENOMEM;
3024         return NULL;
3025     }
3026 
3027     target_vec = lock_user(VERIFY_READ, target_addr,
3028                            count * sizeof(struct target_iovec), 1);
3029     if (target_vec == NULL) {
3030         err = EFAULT;
3031         goto fail2;
3032     }
3033 
3034     /* ??? If host page size > target page size, this will result in a
3035        value larger than what we can actually support.  */
3036     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3037     total_len = 0;
3038 
3039     for (i = 0; i < count; i++) {
3040         abi_ulong base = tswapal(target_vec[i].iov_base);
3041         abi_long len = tswapal(target_vec[i].iov_len);
3042 
3043         if (len < 0) {
3044             err = EINVAL;
3045             goto fail;
3046         } else if (len == 0) {
3047             /* Zero length pointer is ignored.  */
3048             vec[i].iov_base = 0;
3049         } else {
3050             vec[i].iov_base = lock_user(type, base, len, copy);
3051             /* If the first buffer pointer is bad, this is a fault.  But
3052              * subsequent bad buffers will result in a partial write; this
3053              * is realized by filling the vector with null pointers and
3054              * zero lengths. */
3055             if (!vec[i].iov_base) {
3056                 if (i == 0) {
3057                     err = EFAULT;
3058                     goto fail;
3059                 } else {
3060                     bad_address = true;
3061                 }
3062             }
3063             if (bad_address) {
3064                 len = 0;
3065             }
3066             if (len > max_len - total_len) {
3067                 len = max_len - total_len;
3068             }
3069         }
3070         vec[i].iov_len = len;
3071         total_len += len;
3072     }
3073 
3074     unlock_user(target_vec, target_addr, 0);
3075     return vec;
3076 
3077  fail:
3078     while (--i >= 0) {
3079         if (tswapal(target_vec[i].iov_len) > 0) {
3080             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3081         }
3082     }
3083     unlock_user(target_vec, target_addr, 0);
3084  fail2:
3085     g_free(vec);
3086     errno = err;
3087     return NULL;
3088 }
3089 
3090 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3091                          abi_ulong count, int copy)
3092 {
3093     struct target_iovec *target_vec;
3094     int i;
3095 
3096     target_vec = lock_user(VERIFY_READ, target_addr,
3097                            count * sizeof(struct target_iovec), 1);
3098     if (target_vec) {
3099         for (i = 0; i < count; i++) {
3100             abi_ulong base = tswapal(target_vec[i].iov_base);
3101             abi_long len = tswapal(target_vec[i].iov_len);
3102             if (len < 0) {
3103                 break;
3104             }
3105             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3106         }
3107         unlock_user(target_vec, target_addr, 0);
3108     }
3109 
3110     g_free(vec);
3111 }
3112 
3113 static inline int target_to_host_sock_type(int *type)
3114 {
3115     int host_type = 0;
3116     int target_type = *type;
3117 
3118     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3119     case TARGET_SOCK_DGRAM:
3120         host_type = SOCK_DGRAM;
3121         break;
3122     case TARGET_SOCK_STREAM:
3123         host_type = SOCK_STREAM;
3124         break;
3125     default:
3126         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3127         break;
3128     }
3129     if (target_type & TARGET_SOCK_CLOEXEC) {
3130 #if defined(SOCK_CLOEXEC)
3131         host_type |= SOCK_CLOEXEC;
3132 #else
3133         return -TARGET_EINVAL;
3134 #endif
3135     }
3136     if (target_type & TARGET_SOCK_NONBLOCK) {
3137 #if defined(SOCK_NONBLOCK)
3138         host_type |= SOCK_NONBLOCK;
3139 #elif !defined(O_NONBLOCK)
3140         return -TARGET_EINVAL;
3141 #endif
3142     }
3143     *type = host_type;
3144     return 0;
3145 }
3146 
3147 /* Try to emulate socket type flags after socket creation.  */
3148 static int sock_flags_fixup(int fd, int target_type)
3149 {
3150 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3151     if (target_type & TARGET_SOCK_NONBLOCK) {
3152         int flags = fcntl(fd, F_GETFL);
3153         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3154             close(fd);
3155             return -TARGET_EINVAL;
3156         }
3157     }
3158 #endif
3159     return fd;
3160 }
3161 
3162 /* do_socket() Must return target values and target errnos. */
3163 static abi_long do_socket(int domain, int type, int protocol)
3164 {
3165     int target_type = type;
3166     int ret;
3167 
3168     ret = target_to_host_sock_type(&type);
3169     if (ret) {
3170         return ret;
3171     }
3172 
3173     if (domain == PF_NETLINK && !(
3174 #ifdef CONFIG_RTNETLINK
3175          protocol == NETLINK_ROUTE ||
3176 #endif
3177          protocol == NETLINK_KOBJECT_UEVENT ||
3178          protocol == NETLINK_AUDIT)) {
3179         return -TARGET_EPROTONOSUPPORT;
3180     }
3181 
3182     if (domain == AF_PACKET ||
3183         (domain == AF_INET && type == SOCK_PACKET)) {
3184         protocol = tswap16(protocol);
3185     }
3186 
3187     ret = get_errno(socket(domain, type, protocol));
3188     if (ret >= 0) {
3189         ret = sock_flags_fixup(ret, target_type);
3190         if (type == SOCK_PACKET) {
3191             /* Manage an obsolete case :
3192              * if socket type is SOCK_PACKET, bind by name
3193              */
3194             fd_trans_register(ret, &target_packet_trans);
3195         } else if (domain == PF_NETLINK) {
3196             switch (protocol) {
3197 #ifdef CONFIG_RTNETLINK
3198             case NETLINK_ROUTE:
3199                 fd_trans_register(ret, &target_netlink_route_trans);
3200                 break;
3201 #endif
3202             case NETLINK_KOBJECT_UEVENT:
3203                 /* nothing to do: messages are strings */
3204                 break;
3205             case NETLINK_AUDIT:
3206                 fd_trans_register(ret, &target_netlink_audit_trans);
3207                 break;
3208             default:
3209                 g_assert_not_reached();
3210             }
3211         }
3212     }
3213     return ret;
3214 }
3215 
3216 /* do_bind() Must return target values and target errnos. */
3217 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3218                         socklen_t addrlen)
3219 {
3220     void *addr;
3221     abi_long ret;
3222 
3223     if ((int)addrlen < 0) {
3224         return -TARGET_EINVAL;
3225     }
3226 
3227     addr = alloca(addrlen+1);
3228 
3229     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3230     if (ret)
3231         return ret;
3232 
3233     return get_errno(bind(sockfd, addr, addrlen));
3234 }
3235 
3236 /* do_connect() Must return target values and target errnos. */
3237 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3238                            socklen_t addrlen)
3239 {
3240     void *addr;
3241     abi_long ret;
3242 
3243     if ((int)addrlen < 0) {
3244         return -TARGET_EINVAL;
3245     }
3246 
3247     addr = alloca(addrlen+1);
3248 
3249     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3250     if (ret)
3251         return ret;
3252 
3253     return get_errno(safe_connect(sockfd, addr, addrlen));
3254 }
3255 
3256 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3257 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3258                                       int flags, int send)
3259 {
3260     abi_long ret, len;
3261     struct msghdr msg;
3262     abi_ulong count;
3263     struct iovec *vec;
3264     abi_ulong target_vec;
3265 
3266     if (msgp->msg_name) {
3267         msg.msg_namelen = tswap32(msgp->msg_namelen);
3268         msg.msg_name = alloca(msg.msg_namelen+1);
3269         ret = target_to_host_sockaddr(fd, msg.msg_name,
3270                                       tswapal(msgp->msg_name),
3271                                       msg.msg_namelen);
3272         if (ret == -TARGET_EFAULT) {
3273             /* For connected sockets msg_name and msg_namelen must
3274              * be ignored, so returning EFAULT immediately is wrong.
3275              * Instead, pass a bad msg_name to the host kernel, and
3276              * let it decide whether to return EFAULT or not.
3277              */
3278             msg.msg_name = (void *)-1;
3279         } else if (ret) {
3280             goto out2;
3281         }
3282     } else {
3283         msg.msg_name = NULL;
3284         msg.msg_namelen = 0;
3285     }
3286     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3287     msg.msg_control = alloca(msg.msg_controllen);
3288     memset(msg.msg_control, 0, msg.msg_controllen);
3289 
3290     msg.msg_flags = tswap32(msgp->msg_flags);
3291 
3292     count = tswapal(msgp->msg_iovlen);
3293     target_vec = tswapal(msgp->msg_iov);
3294 
3295     if (count > IOV_MAX) {
3296         /* sendrcvmsg returns a different errno for this condition than
3297          * readv/writev, so we must catch it here before lock_iovec() does.
3298          */
3299         ret = -TARGET_EMSGSIZE;
3300         goto out2;
3301     }
3302 
3303     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3304                      target_vec, count, send);
3305     if (vec == NULL) {
3306         ret = -host_to_target_errno(errno);
3307         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3308         if (!send || ret) {
3309             goto out2;
3310         }
3311     }
3312     msg.msg_iovlen = count;
3313     msg.msg_iov = vec;
3314 
3315     if (send) {
3316         if (fd_trans_target_to_host_data(fd)) {
3317             void *host_msg;
3318 
3319             host_msg = g_malloc(msg.msg_iov->iov_len);
3320             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3321             ret = fd_trans_target_to_host_data(fd)(host_msg,
3322                                                    msg.msg_iov->iov_len);
3323             if (ret >= 0) {
3324                 msg.msg_iov->iov_base = host_msg;
3325                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3326             }
3327             g_free(host_msg);
3328         } else {
3329             ret = target_to_host_cmsg(&msg, msgp);
3330             if (ret == 0) {
3331                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3332             }
3333         }
3334     } else {
3335         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3336         if (!is_error(ret)) {
3337             len = ret;
3338             if (fd_trans_host_to_target_data(fd)) {
3339                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3340                                                MIN(msg.msg_iov->iov_len, len));
3341             }
3342             if (!is_error(ret)) {
3343                 ret = host_to_target_cmsg(msgp, &msg);
3344             }
3345             if (!is_error(ret)) {
3346                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3347                 msgp->msg_flags = tswap32(msg.msg_flags);
3348                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3349                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3350                                     msg.msg_name, msg.msg_namelen);
3351                     if (ret) {
3352                         goto out;
3353                     }
3354                 }
3355 
3356                 ret = len;
3357             }
3358         }
3359     }
3360 
3361 out:
3362     if (vec) {
3363         unlock_iovec(vec, target_vec, count, !send);
3364     }
3365 out2:
3366     return ret;
3367 }
3368 
3369 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3370                                int flags, int send)
3371 {
3372     abi_long ret;
3373     struct target_msghdr *msgp;
3374 
3375     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3376                           msgp,
3377                           target_msg,
3378                           send ? 1 : 0)) {
3379         return -TARGET_EFAULT;
3380     }
3381     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3382     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3383     return ret;
3384 }
3385 
3386 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3387  * so it might not have this *mmsg-specific flag either.
3388  */
3389 #ifndef MSG_WAITFORONE
3390 #define MSG_WAITFORONE 0x10000
3391 #endif
3392 
3393 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3394                                 unsigned int vlen, unsigned int flags,
3395                                 int send)
3396 {
3397     struct target_mmsghdr *mmsgp;
3398     abi_long ret = 0;
3399     int i;
3400 
3401     if (vlen > UIO_MAXIOV) {
3402         vlen = UIO_MAXIOV;
3403     }
3404 
3405     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3406     if (!mmsgp) {
3407         return -TARGET_EFAULT;
3408     }
3409 
3410     for (i = 0; i < vlen; i++) {
3411         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3412         if (is_error(ret)) {
3413             break;
3414         }
3415         mmsgp[i].msg_len = tswap32(ret);
3416         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3417         if (flags & MSG_WAITFORONE) {
3418             flags |= MSG_DONTWAIT;
3419         }
3420     }
3421 
3422     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3423 
3424     /* Return number of datagrams sent if we sent any at all;
3425      * otherwise return the error.
3426      */
3427     if (i) {
3428         return i;
3429     }
3430     return ret;
3431 }
3432 
3433 /* do_accept4() Must return target values and target errnos. */
3434 static abi_long do_accept4(int fd, abi_ulong target_addr,
3435                            abi_ulong target_addrlen_addr, int flags)
3436 {
3437     socklen_t addrlen, ret_addrlen;
3438     void *addr;
3439     abi_long ret;
3440     int host_flags;
3441 
3442     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3443 
3444     if (target_addr == 0) {
3445         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3446     }
3447 
3448     /* linux returns EFAULT if addrlen pointer is invalid */
3449     if (get_user_u32(addrlen, target_addrlen_addr))
3450         return -TARGET_EFAULT;
3451 
3452     if ((int)addrlen < 0) {
3453         return -TARGET_EINVAL;
3454     }
3455 
3456     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3457         return -TARGET_EFAULT;
3458     }
3459 
3460     addr = alloca(addrlen);
3461 
3462     ret_addrlen = addrlen;
3463     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3464     if (!is_error(ret)) {
3465         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3466         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3467             ret = -TARGET_EFAULT;
3468         }
3469     }
3470     return ret;
3471 }
3472 
3473 /* do_getpeername() Must return target values and target errnos. */
3474 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3475                                abi_ulong target_addrlen_addr)
3476 {
3477     socklen_t addrlen, ret_addrlen;
3478     void *addr;
3479     abi_long ret;
3480 
3481     if (get_user_u32(addrlen, target_addrlen_addr))
3482         return -TARGET_EFAULT;
3483 
3484     if ((int)addrlen < 0) {
3485         return -TARGET_EINVAL;
3486     }
3487 
3488     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3489         return -TARGET_EFAULT;
3490     }
3491 
3492     addr = alloca(addrlen);
3493 
3494     ret_addrlen = addrlen;
3495     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3496     if (!is_error(ret)) {
3497         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3498         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3499             ret = -TARGET_EFAULT;
3500         }
3501     }
3502     return ret;
3503 }
3504 
3505 /* do_getsockname() Must return target values and target errnos. */
3506 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3507                                abi_ulong target_addrlen_addr)
3508 {
3509     socklen_t addrlen, ret_addrlen;
3510     void *addr;
3511     abi_long ret;
3512 
3513     if (get_user_u32(addrlen, target_addrlen_addr))
3514         return -TARGET_EFAULT;
3515 
3516     if ((int)addrlen < 0) {
3517         return -TARGET_EINVAL;
3518     }
3519 
3520     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3521         return -TARGET_EFAULT;
3522     }
3523 
3524     addr = alloca(addrlen);
3525 
3526     ret_addrlen = addrlen;
3527     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3528     if (!is_error(ret)) {
3529         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3530         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3531             ret = -TARGET_EFAULT;
3532         }
3533     }
3534     return ret;
3535 }
3536 
3537 /* do_socketpair() Must return target values and target errnos. */
3538 static abi_long do_socketpair(int domain, int type, int protocol,
3539                               abi_ulong target_tab_addr)
3540 {
3541     int tab[2];
3542     abi_long ret;
3543 
3544     target_to_host_sock_type(&type);
3545 
3546     ret = get_errno(socketpair(domain, type, protocol, tab));
3547     if (!is_error(ret)) {
3548         if (put_user_s32(tab[0], target_tab_addr)
3549             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3550             ret = -TARGET_EFAULT;
3551     }
3552     return ret;
3553 }
3554 
3555 /* do_sendto() Must return target values and target errnos. */
3556 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3557                           abi_ulong target_addr, socklen_t addrlen)
3558 {
3559     void *addr;
3560     void *host_msg;
3561     void *copy_msg = NULL;
3562     abi_long ret;
3563 
3564     if ((int)addrlen < 0) {
3565         return -TARGET_EINVAL;
3566     }
3567 
3568     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3569     if (!host_msg)
3570         return -TARGET_EFAULT;
3571     if (fd_trans_target_to_host_data(fd)) {
3572         copy_msg = host_msg;
3573         host_msg = g_malloc(len);
3574         memcpy(host_msg, copy_msg, len);
3575         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3576         if (ret < 0) {
3577             goto fail;
3578         }
3579     }
3580     if (target_addr) {
3581         addr = alloca(addrlen+1);
3582         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3583         if (ret) {
3584             goto fail;
3585         }
3586         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3587     } else {
3588         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3589     }
3590 fail:
3591     if (copy_msg) {
3592         g_free(host_msg);
3593         host_msg = copy_msg;
3594     }
3595     unlock_user(host_msg, msg, 0);
3596     return ret;
3597 }
3598 
3599 /* do_recvfrom() Must return target values and target errnos. */
3600 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3601                             abi_ulong target_addr,
3602                             abi_ulong target_addrlen)
3603 {
3604     socklen_t addrlen, ret_addrlen;
3605     void *addr;
3606     void *host_msg;
3607     abi_long ret;
3608 
3609     if (!msg) {
3610         host_msg = NULL;
3611     } else {
3612         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3613         if (!host_msg) {
3614             return -TARGET_EFAULT;
3615         }
3616     }
3617     if (target_addr) {
3618         if (get_user_u32(addrlen, target_addrlen)) {
3619             ret = -TARGET_EFAULT;
3620             goto fail;
3621         }
3622         if ((int)addrlen < 0) {
3623             ret = -TARGET_EINVAL;
3624             goto fail;
3625         }
3626         addr = alloca(addrlen);
3627         ret_addrlen = addrlen;
3628         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3629                                       addr, &ret_addrlen));
3630     } else {
3631         addr = NULL; /* To keep compiler quiet.  */
3632         addrlen = 0; /* To keep compiler quiet.  */
3633         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3634     }
3635     if (!is_error(ret)) {
3636         if (fd_trans_host_to_target_data(fd)) {
3637             abi_long trans;
3638             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3639             if (is_error(trans)) {
3640                 ret = trans;
3641                 goto fail;
3642             }
3643         }
3644         if (target_addr) {
3645             host_to_target_sockaddr(target_addr, addr,
3646                                     MIN(addrlen, ret_addrlen));
3647             if (put_user_u32(ret_addrlen, target_addrlen)) {
3648                 ret = -TARGET_EFAULT;
3649                 goto fail;
3650             }
3651         }
3652         unlock_user(host_msg, msg, len);
3653     } else {
3654 fail:
3655         unlock_user(host_msg, msg, 0);
3656     }
3657     return ret;
3658 }
3659 
3660 #ifdef TARGET_NR_socketcall
3661 /* do_socketcall() must return target values and target errnos. */
3662 static abi_long do_socketcall(int num, abi_ulong vptr)
3663 {
3664     static const unsigned nargs[] = { /* number of arguments per operation */
3665         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3666         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3667         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3668         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3669         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3670         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3671         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3672         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3673         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3674         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3675         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3676         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3677         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3678         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3679         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3680         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3681         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3682         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3683         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3684         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3685     };
3686     abi_long a[6]; /* max 6 args */
3687     unsigned i;
3688 
3689     /* check the range of the first argument num */
3690     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3691     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3692         return -TARGET_EINVAL;
3693     }
3694     /* ensure we have space for args */
3695     if (nargs[num] > ARRAY_SIZE(a)) {
3696         return -TARGET_EINVAL;
3697     }
3698     /* collect the arguments in a[] according to nargs[] */
3699     for (i = 0; i < nargs[num]; ++i) {
3700         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3701             return -TARGET_EFAULT;
3702         }
3703     }
3704     /* now when we have the args, invoke the appropriate underlying function */
3705     switch (num) {
3706     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3707         return do_socket(a[0], a[1], a[2]);
3708     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3709         return do_bind(a[0], a[1], a[2]);
3710     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3711         return do_connect(a[0], a[1], a[2]);
3712     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3713         return get_errno(listen(a[0], a[1]));
3714     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3715         return do_accept4(a[0], a[1], a[2], 0);
3716     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3717         return do_getsockname(a[0], a[1], a[2]);
3718     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3719         return do_getpeername(a[0], a[1], a[2]);
3720     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3721         return do_socketpair(a[0], a[1], a[2], a[3]);
3722     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3723         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3724     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3725         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3726     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3727         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3728     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3729         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3730     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3731         return get_errno(shutdown(a[0], a[1]));
3732     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3733         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3734     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3735         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3736     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3737         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3738     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3739         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3740     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3741         return do_accept4(a[0], a[1], a[2], a[3]);
3742     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3743         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3744     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3745         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3746     default:
3747         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3748         return -TARGET_EINVAL;
3749     }
3750 }
3751 #endif
3752 
3753 #define N_SHM_REGIONS	32
3754 
3755 static struct shm_region {
3756     abi_ulong start;
3757     abi_ulong size;
3758     bool in_use;
3759 } shm_regions[N_SHM_REGIONS];
3760 
3761 #ifndef TARGET_SEMID64_DS
3762 /* asm-generic version of this struct */
3763 struct target_semid64_ds
3764 {
3765   struct target_ipc_perm sem_perm;
3766   abi_ulong sem_otime;
3767 #if TARGET_ABI_BITS == 32
3768   abi_ulong __unused1;
3769 #endif
3770   abi_ulong sem_ctime;
3771 #if TARGET_ABI_BITS == 32
3772   abi_ulong __unused2;
3773 #endif
3774   abi_ulong sem_nsems;
3775   abi_ulong __unused3;
3776   abi_ulong __unused4;
3777 };
3778 #endif
3779 
3780 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3781                                                abi_ulong target_addr)
3782 {
3783     struct target_ipc_perm *target_ip;
3784     struct target_semid64_ds *target_sd;
3785 
3786     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3787         return -TARGET_EFAULT;
3788     target_ip = &(target_sd->sem_perm);
3789     host_ip->__key = tswap32(target_ip->__key);
3790     host_ip->uid = tswap32(target_ip->uid);
3791     host_ip->gid = tswap32(target_ip->gid);
3792     host_ip->cuid = tswap32(target_ip->cuid);
3793     host_ip->cgid = tswap32(target_ip->cgid);
3794 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3795     host_ip->mode = tswap32(target_ip->mode);
3796 #else
3797     host_ip->mode = tswap16(target_ip->mode);
3798 #endif
3799 #if defined(TARGET_PPC)
3800     host_ip->__seq = tswap32(target_ip->__seq);
3801 #else
3802     host_ip->__seq = tswap16(target_ip->__seq);
3803 #endif
3804     unlock_user_struct(target_sd, target_addr, 0);
3805     return 0;
3806 }
3807 
3808 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3809                                                struct ipc_perm *host_ip)
3810 {
3811     struct target_ipc_perm *target_ip;
3812     struct target_semid64_ds *target_sd;
3813 
3814     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3815         return -TARGET_EFAULT;
3816     target_ip = &(target_sd->sem_perm);
3817     target_ip->__key = tswap32(host_ip->__key);
3818     target_ip->uid = tswap32(host_ip->uid);
3819     target_ip->gid = tswap32(host_ip->gid);
3820     target_ip->cuid = tswap32(host_ip->cuid);
3821     target_ip->cgid = tswap32(host_ip->cgid);
3822 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3823     target_ip->mode = tswap32(host_ip->mode);
3824 #else
3825     target_ip->mode = tswap16(host_ip->mode);
3826 #endif
3827 #if defined(TARGET_PPC)
3828     target_ip->__seq = tswap32(host_ip->__seq);
3829 #else
3830     target_ip->__seq = tswap16(host_ip->__seq);
3831 #endif
3832     unlock_user_struct(target_sd, target_addr, 1);
3833     return 0;
3834 }
3835 
3836 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3837                                                abi_ulong target_addr)
3838 {
3839     struct target_semid64_ds *target_sd;
3840 
3841     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3842         return -TARGET_EFAULT;
3843     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3844         return -TARGET_EFAULT;
3845     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3846     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3847     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3848     unlock_user_struct(target_sd, target_addr, 0);
3849     return 0;
3850 }
3851 
3852 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3853                                                struct semid_ds *host_sd)
3854 {
3855     struct target_semid64_ds *target_sd;
3856 
3857     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3858         return -TARGET_EFAULT;
3859     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3860         return -TARGET_EFAULT;
3861     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3862     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3863     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3864     unlock_user_struct(target_sd, target_addr, 1);
3865     return 0;
3866 }
3867 
3868 struct target_seminfo {
3869     int semmap;
3870     int semmni;
3871     int semmns;
3872     int semmnu;
3873     int semmsl;
3874     int semopm;
3875     int semume;
3876     int semusz;
3877     int semvmx;
3878     int semaem;
3879 };
3880 
3881 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3882                                               struct seminfo *host_seminfo)
3883 {
3884     struct target_seminfo *target_seminfo;
3885     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3886         return -TARGET_EFAULT;
3887     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3888     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3889     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3890     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3891     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3892     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3893     __put_user(host_seminfo->semume, &target_seminfo->semume);
3894     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3895     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3896     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3897     unlock_user_struct(target_seminfo, target_addr, 1);
3898     return 0;
3899 }
3900 
3901 union semun {
3902 	int val;
3903 	struct semid_ds *buf;
3904 	unsigned short *array;
3905 	struct seminfo *__buf;
3906 };
3907 
3908 union target_semun {
3909 	int val;
3910 	abi_ulong buf;
3911 	abi_ulong array;
3912 	abi_ulong __buf;
3913 };
3914 
3915 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3916                                                abi_ulong target_addr)
3917 {
3918     int nsems;
3919     unsigned short *array;
3920     union semun semun;
3921     struct semid_ds semid_ds;
3922     int i, ret;
3923 
3924     semun.buf = &semid_ds;
3925 
3926     ret = semctl(semid, 0, IPC_STAT, semun);
3927     if (ret == -1)
3928         return get_errno(ret);
3929 
3930     nsems = semid_ds.sem_nsems;
3931 
3932     *host_array = g_try_new(unsigned short, nsems);
3933     if (!*host_array) {
3934         return -TARGET_ENOMEM;
3935     }
3936     array = lock_user(VERIFY_READ, target_addr,
3937                       nsems*sizeof(unsigned short), 1);
3938     if (!array) {
3939         g_free(*host_array);
3940         return -TARGET_EFAULT;
3941     }
3942 
3943     for(i=0; i<nsems; i++) {
3944         __get_user((*host_array)[i], &array[i]);
3945     }
3946     unlock_user(array, target_addr, 0);
3947 
3948     return 0;
3949 }
3950 
3951 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3952                                                unsigned short **host_array)
3953 {
3954     int nsems;
3955     unsigned short *array;
3956     union semun semun;
3957     struct semid_ds semid_ds;
3958     int i, ret;
3959 
3960     semun.buf = &semid_ds;
3961 
3962     ret = semctl(semid, 0, IPC_STAT, semun);
3963     if (ret == -1)
3964         return get_errno(ret);
3965 
3966     nsems = semid_ds.sem_nsems;
3967 
3968     array = lock_user(VERIFY_WRITE, target_addr,
3969                       nsems*sizeof(unsigned short), 0);
3970     if (!array)
3971         return -TARGET_EFAULT;
3972 
3973     for(i=0; i<nsems; i++) {
3974         __put_user((*host_array)[i], &array[i]);
3975     }
3976     g_free(*host_array);
3977     unlock_user(array, target_addr, 1);
3978 
3979     return 0;
3980 }
3981 
3982 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3983                                  abi_ulong target_arg)
3984 {
3985     union target_semun target_su = { .buf = target_arg };
3986     union semun arg;
3987     struct semid_ds dsarg;
3988     unsigned short *array = NULL;
3989     struct seminfo seminfo;
3990     abi_long ret = -TARGET_EINVAL;
3991     abi_long err;
3992     cmd &= 0xff;
3993 
3994     switch( cmd ) {
3995 	case GETVAL:
3996 	case SETVAL:
3997             /* In 64 bit cross-endian situations, we will erroneously pick up
3998              * the wrong half of the union for the "val" element.  To rectify
3999              * this, the entire 8-byte structure is byteswapped, followed by
4000 	     * a swap of the 4 byte val field. In other cases, the data is
4001 	     * already in proper host byte order. */
4002 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4003 		target_su.buf = tswapal(target_su.buf);
4004 		arg.val = tswap32(target_su.val);
4005 	    } else {
4006 		arg.val = target_su.val;
4007 	    }
4008             ret = get_errno(semctl(semid, semnum, cmd, arg));
4009             break;
4010 	case GETALL:
4011 	case SETALL:
4012             err = target_to_host_semarray(semid, &array, target_su.array);
4013             if (err)
4014                 return err;
4015             arg.array = array;
4016             ret = get_errno(semctl(semid, semnum, cmd, arg));
4017             err = host_to_target_semarray(semid, target_su.array, &array);
4018             if (err)
4019                 return err;
4020             break;
4021 	case IPC_STAT:
4022 	case IPC_SET:
4023 	case SEM_STAT:
4024             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4025             if (err)
4026                 return err;
4027             arg.buf = &dsarg;
4028             ret = get_errno(semctl(semid, semnum, cmd, arg));
4029             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4030             if (err)
4031                 return err;
4032             break;
4033 	case IPC_INFO:
4034 	case SEM_INFO:
4035             arg.__buf = &seminfo;
4036             ret = get_errno(semctl(semid, semnum, cmd, arg));
4037             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4038             if (err)
4039                 return err;
4040             break;
4041 	case IPC_RMID:
4042 	case GETPID:
4043 	case GETNCNT:
4044 	case GETZCNT:
4045             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4046             break;
4047     }
4048 
4049     return ret;
4050 }
4051 
4052 struct target_sembuf {
4053     unsigned short sem_num;
4054     short sem_op;
4055     short sem_flg;
4056 };
4057 
4058 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4059                                              abi_ulong target_addr,
4060                                              unsigned nsops)
4061 {
4062     struct target_sembuf *target_sembuf;
4063     int i;
4064 
4065     target_sembuf = lock_user(VERIFY_READ, target_addr,
4066                               nsops*sizeof(struct target_sembuf), 1);
4067     if (!target_sembuf)
4068         return -TARGET_EFAULT;
4069 
4070     for(i=0; i<nsops; i++) {
4071         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4072         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4073         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4074     }
4075 
4076     unlock_user(target_sembuf, target_addr, 0);
4077 
4078     return 0;
4079 }
4080 
4081 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4082     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4083 
4084 /*
4085  * This macro is required to handle the s390 variants, which passes the
4086  * arguments in a different order than default.
4087  */
4088 #ifdef __s390x__
4089 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4090   (__nsops), (__timeout), (__sops)
4091 #else
4092 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4093   (__nsops), 0, (__sops), (__timeout)
4094 #endif
4095 
4096 static inline abi_long do_semtimedop(int semid,
4097                                      abi_long ptr,
4098                                      unsigned nsops,
4099                                      abi_long timeout, bool time64)
4100 {
4101     struct sembuf *sops;
4102     struct timespec ts, *pts = NULL;
4103     abi_long ret;
4104 
4105     if (timeout) {
4106         pts = &ts;
4107         if (time64) {
4108             if (target_to_host_timespec64(pts, timeout)) {
4109                 return -TARGET_EFAULT;
4110             }
4111         } else {
4112             if (target_to_host_timespec(pts, timeout)) {
4113                 return -TARGET_EFAULT;
4114             }
4115         }
4116     }
4117 
4118     if (nsops > TARGET_SEMOPM) {
4119         return -TARGET_E2BIG;
4120     }
4121 
4122     sops = g_new(struct sembuf, nsops);
4123 
4124     if (target_to_host_sembuf(sops, ptr, nsops)) {
4125         g_free(sops);
4126         return -TARGET_EFAULT;
4127     }
4128 
4129     ret = -TARGET_ENOSYS;
4130 #ifdef __NR_semtimedop
4131     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4132 #endif
4133 #ifdef __NR_ipc
4134     if (ret == -TARGET_ENOSYS) {
4135         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4136                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4137     }
4138 #endif
4139     g_free(sops);
4140     return ret;
4141 }
4142 #endif
4143 
4144 struct target_msqid_ds
4145 {
4146     struct target_ipc_perm msg_perm;
4147     abi_ulong msg_stime;
4148 #if TARGET_ABI_BITS == 32
4149     abi_ulong __unused1;
4150 #endif
4151     abi_ulong msg_rtime;
4152 #if TARGET_ABI_BITS == 32
4153     abi_ulong __unused2;
4154 #endif
4155     abi_ulong msg_ctime;
4156 #if TARGET_ABI_BITS == 32
4157     abi_ulong __unused3;
4158 #endif
4159     abi_ulong __msg_cbytes;
4160     abi_ulong msg_qnum;
4161     abi_ulong msg_qbytes;
4162     abi_ulong msg_lspid;
4163     abi_ulong msg_lrpid;
4164     abi_ulong __unused4;
4165     abi_ulong __unused5;
4166 };
4167 
4168 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4169                                                abi_ulong target_addr)
4170 {
4171     struct target_msqid_ds *target_md;
4172 
4173     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4174         return -TARGET_EFAULT;
4175     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4176         return -TARGET_EFAULT;
4177     host_md->msg_stime = tswapal(target_md->msg_stime);
4178     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4179     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4180     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4181     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4182     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4183     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4184     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4185     unlock_user_struct(target_md, target_addr, 0);
4186     return 0;
4187 }
4188 
4189 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4190                                                struct msqid_ds *host_md)
4191 {
4192     struct target_msqid_ds *target_md;
4193 
4194     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4195         return -TARGET_EFAULT;
4196     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4197         return -TARGET_EFAULT;
4198     target_md->msg_stime = tswapal(host_md->msg_stime);
4199     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4200     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4201     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4202     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4203     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4204     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4205     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4206     unlock_user_struct(target_md, target_addr, 1);
4207     return 0;
4208 }
4209 
4210 struct target_msginfo {
4211     int msgpool;
4212     int msgmap;
4213     int msgmax;
4214     int msgmnb;
4215     int msgmni;
4216     int msgssz;
4217     int msgtql;
4218     unsigned short int msgseg;
4219 };
4220 
4221 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4222                                               struct msginfo *host_msginfo)
4223 {
4224     struct target_msginfo *target_msginfo;
4225     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4226         return -TARGET_EFAULT;
4227     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4228     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4229     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4230     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4231     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4232     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4233     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4234     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4235     unlock_user_struct(target_msginfo, target_addr, 1);
4236     return 0;
4237 }
4238 
4239 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4240 {
4241     struct msqid_ds dsarg;
4242     struct msginfo msginfo;
4243     abi_long ret = -TARGET_EINVAL;
4244 
4245     cmd &= 0xff;
4246 
4247     switch (cmd) {
4248     case IPC_STAT:
4249     case IPC_SET:
4250     case MSG_STAT:
4251         if (target_to_host_msqid_ds(&dsarg,ptr))
4252             return -TARGET_EFAULT;
4253         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4254         if (host_to_target_msqid_ds(ptr,&dsarg))
4255             return -TARGET_EFAULT;
4256         break;
4257     case IPC_RMID:
4258         ret = get_errno(msgctl(msgid, cmd, NULL));
4259         break;
4260     case IPC_INFO:
4261     case MSG_INFO:
4262         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4263         if (host_to_target_msginfo(ptr, &msginfo))
4264             return -TARGET_EFAULT;
4265         break;
4266     }
4267 
4268     return ret;
4269 }
4270 
4271 struct target_msgbuf {
4272     abi_long mtype;
4273     char	mtext[1];
4274 };
4275 
4276 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4277                                  ssize_t msgsz, int msgflg)
4278 {
4279     struct target_msgbuf *target_mb;
4280     struct msgbuf *host_mb;
4281     abi_long ret = 0;
4282 
4283     if (msgsz < 0) {
4284         return -TARGET_EINVAL;
4285     }
4286 
4287     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4288         return -TARGET_EFAULT;
4289     host_mb = g_try_malloc(msgsz + sizeof(long));
4290     if (!host_mb) {
4291         unlock_user_struct(target_mb, msgp, 0);
4292         return -TARGET_ENOMEM;
4293     }
4294     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4295     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4296     ret = -TARGET_ENOSYS;
4297 #ifdef __NR_msgsnd
4298     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4299 #endif
4300 #ifdef __NR_ipc
4301     if (ret == -TARGET_ENOSYS) {
4302 #ifdef __s390x__
4303         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4304                                  host_mb));
4305 #else
4306         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4307                                  host_mb, 0));
4308 #endif
4309     }
4310 #endif
4311     g_free(host_mb);
4312     unlock_user_struct(target_mb, msgp, 0);
4313 
4314     return ret;
4315 }
4316 
4317 #ifdef __NR_ipc
4318 #if defined(__sparc__)
4319 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4320 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4321 #elif defined(__s390x__)
4322 /* The s390 sys_ipc variant has only five parameters.  */
4323 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4324     ((long int[]){(long int)__msgp, __msgtyp})
4325 #else
4326 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4327     ((long int[]){(long int)__msgp, __msgtyp}), 0
4328 #endif
4329 #endif
4330 
4331 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4332                                  ssize_t msgsz, abi_long msgtyp,
4333                                  int msgflg)
4334 {
4335     struct target_msgbuf *target_mb;
4336     char *target_mtext;
4337     struct msgbuf *host_mb;
4338     abi_long ret = 0;
4339 
4340     if (msgsz < 0) {
4341         return -TARGET_EINVAL;
4342     }
4343 
4344     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4345         return -TARGET_EFAULT;
4346 
4347     host_mb = g_try_malloc(msgsz + sizeof(long));
4348     if (!host_mb) {
4349         ret = -TARGET_ENOMEM;
4350         goto end;
4351     }
4352     ret = -TARGET_ENOSYS;
4353 #ifdef __NR_msgrcv
4354     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4355 #endif
4356 #ifdef __NR_ipc
4357     if (ret == -TARGET_ENOSYS) {
4358         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4359                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4360     }
4361 #endif
4362 
4363     if (ret > 0) {
4364         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4365         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4366         if (!target_mtext) {
4367             ret = -TARGET_EFAULT;
4368             goto end;
4369         }
4370         memcpy(target_mb->mtext, host_mb->mtext, ret);
4371         unlock_user(target_mtext, target_mtext_addr, ret);
4372     }
4373 
4374     target_mb->mtype = tswapal(host_mb->mtype);
4375 
4376 end:
4377     if (target_mb)
4378         unlock_user_struct(target_mb, msgp, 1);
4379     g_free(host_mb);
4380     return ret;
4381 }
4382 
4383 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4384                                                abi_ulong target_addr)
4385 {
4386     struct target_shmid_ds *target_sd;
4387 
4388     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4389         return -TARGET_EFAULT;
4390     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4391         return -TARGET_EFAULT;
4392     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4393     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4394     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4395     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4396     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4397     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4398     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4399     unlock_user_struct(target_sd, target_addr, 0);
4400     return 0;
4401 }
4402 
4403 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4404                                                struct shmid_ds *host_sd)
4405 {
4406     struct target_shmid_ds *target_sd;
4407 
4408     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4409         return -TARGET_EFAULT;
4410     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4411         return -TARGET_EFAULT;
4412     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4413     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4414     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4415     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4416     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4417     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4418     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4419     unlock_user_struct(target_sd, target_addr, 1);
4420     return 0;
4421 }
4422 
4423 struct  target_shminfo {
4424     abi_ulong shmmax;
4425     abi_ulong shmmin;
4426     abi_ulong shmmni;
4427     abi_ulong shmseg;
4428     abi_ulong shmall;
4429 };
4430 
4431 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4432                                               struct shminfo *host_shminfo)
4433 {
4434     struct target_shminfo *target_shminfo;
4435     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4436         return -TARGET_EFAULT;
4437     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4438     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4439     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4440     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4441     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4442     unlock_user_struct(target_shminfo, target_addr, 1);
4443     return 0;
4444 }
4445 
4446 struct target_shm_info {
4447     int used_ids;
4448     abi_ulong shm_tot;
4449     abi_ulong shm_rss;
4450     abi_ulong shm_swp;
4451     abi_ulong swap_attempts;
4452     abi_ulong swap_successes;
4453 };
4454 
4455 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4456                                                struct shm_info *host_shm_info)
4457 {
4458     struct target_shm_info *target_shm_info;
4459     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4460         return -TARGET_EFAULT;
4461     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4462     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4463     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4464     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4465     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4466     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4467     unlock_user_struct(target_shm_info, target_addr, 1);
4468     return 0;
4469 }
4470 
4471 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4472 {
4473     struct shmid_ds dsarg;
4474     struct shminfo shminfo;
4475     struct shm_info shm_info;
4476     abi_long ret = -TARGET_EINVAL;
4477 
4478     cmd &= 0xff;
4479 
4480     switch(cmd) {
4481     case IPC_STAT:
4482     case IPC_SET:
4483     case SHM_STAT:
4484         if (target_to_host_shmid_ds(&dsarg, buf))
4485             return -TARGET_EFAULT;
4486         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4487         if (host_to_target_shmid_ds(buf, &dsarg))
4488             return -TARGET_EFAULT;
4489         break;
4490     case IPC_INFO:
4491         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4492         if (host_to_target_shminfo(buf, &shminfo))
4493             return -TARGET_EFAULT;
4494         break;
4495     case SHM_INFO:
4496         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4497         if (host_to_target_shm_info(buf, &shm_info))
4498             return -TARGET_EFAULT;
4499         break;
4500     case IPC_RMID:
4501     case SHM_LOCK:
4502     case SHM_UNLOCK:
4503         ret = get_errno(shmctl(shmid, cmd, NULL));
4504         break;
4505     }
4506 
4507     return ret;
4508 }
4509 
4510 #ifndef TARGET_FORCE_SHMLBA
4511 /* For most architectures, SHMLBA is the same as the page size;
4512  * some architectures have larger values, in which case they should
4513  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4514  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4515  * and defining its own value for SHMLBA.
4516  *
4517  * The kernel also permits SHMLBA to be set by the architecture to a
4518  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4519  * this means that addresses are rounded to the large size if
4520  * SHM_RND is set but addresses not aligned to that size are not rejected
4521  * as long as they are at least page-aligned. Since the only architecture
4522  * which uses this is ia64 this code doesn't provide for that oddity.
4523  */
4524 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4525 {
4526     return TARGET_PAGE_SIZE;
4527 }
4528 #endif
4529 
4530 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4531                                  int shmid, abi_ulong shmaddr, int shmflg)
4532 {
4533     CPUState *cpu = env_cpu(cpu_env);
4534     abi_long raddr;
4535     void *host_raddr;
4536     struct shmid_ds shm_info;
4537     int i,ret;
4538     abi_ulong shmlba;
4539 
4540     /* shmat pointers are always untagged */
4541 
4542     /* find out the length of the shared memory segment */
4543     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4544     if (is_error(ret)) {
4545         /* can't get length, bail out */
4546         return ret;
4547     }
4548 
4549     shmlba = target_shmlba(cpu_env);
4550 
4551     if (shmaddr & (shmlba - 1)) {
4552         if (shmflg & SHM_RND) {
4553             shmaddr &= ~(shmlba - 1);
4554         } else {
4555             return -TARGET_EINVAL;
4556         }
4557     }
4558     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4559         return -TARGET_EINVAL;
4560     }
4561 
4562     mmap_lock();
4563 
4564     /*
4565      * We're mapping shared memory, so ensure we generate code for parallel
4566      * execution and flush old translations.  This will work up to the level
4567      * supported by the host -- anything that requires EXCP_ATOMIC will not
4568      * be atomic with respect to an external process.
4569      */
4570     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4571         cpu->tcg_cflags |= CF_PARALLEL;
4572         tb_flush(cpu);
4573     }
4574 
4575     if (shmaddr)
4576         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4577     else {
4578         abi_ulong mmap_start;
4579 
4580         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4581         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4582 
4583         if (mmap_start == -1) {
4584             errno = ENOMEM;
4585             host_raddr = (void *)-1;
4586         } else
4587             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4588                                shmflg | SHM_REMAP);
4589     }
4590 
4591     if (host_raddr == (void *)-1) {
4592         mmap_unlock();
4593         return get_errno((long)host_raddr);
4594     }
4595     raddr=h2g((unsigned long)host_raddr);
4596 
4597     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4598                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4599                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4600 
4601     for (i = 0; i < N_SHM_REGIONS; i++) {
4602         if (!shm_regions[i].in_use) {
4603             shm_regions[i].in_use = true;
4604             shm_regions[i].start = raddr;
4605             shm_regions[i].size = shm_info.shm_segsz;
4606             break;
4607         }
4608     }
4609 
4610     mmap_unlock();
4611     return raddr;
4612 
4613 }
4614 
4615 static inline abi_long do_shmdt(abi_ulong shmaddr)
4616 {
4617     int i;
4618     abi_long rv;
4619 
4620     /* shmdt pointers are always untagged */
4621 
4622     mmap_lock();
4623 
4624     for (i = 0; i < N_SHM_REGIONS; ++i) {
4625         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4626             shm_regions[i].in_use = false;
4627             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4628             break;
4629         }
4630     }
4631     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4632 
4633     mmap_unlock();
4634 
4635     return rv;
4636 }
4637 
4638 #ifdef TARGET_NR_ipc
4639 /* ??? This only works with linear mappings.  */
4640 /* do_ipc() must return target values and target errnos. */
4641 static abi_long do_ipc(CPUArchState *cpu_env,
4642                        unsigned int call, abi_long first,
4643                        abi_long second, abi_long third,
4644                        abi_long ptr, abi_long fifth)
4645 {
4646     int version;
4647     abi_long ret = 0;
4648 
4649     version = call >> 16;
4650     call &= 0xffff;
4651 
4652     switch (call) {
4653     case IPCOP_semop:
4654         ret = do_semtimedop(first, ptr, second, 0, false);
4655         break;
4656     case IPCOP_semtimedop:
4657     /*
4658      * The s390 sys_ipc variant has only five parameters instead of six
4659      * (as for default variant) and the only difference is the handling of
4660      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4661      * to a struct timespec where the generic variant uses fifth parameter.
4662      */
4663 #if defined(TARGET_S390X)
4664         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4665 #else
4666         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4667 #endif
4668         break;
4669 
4670     case IPCOP_semget:
4671         ret = get_errno(semget(first, second, third));
4672         break;
4673 
4674     case IPCOP_semctl: {
4675         /* The semun argument to semctl is passed by value, so dereference the
4676          * ptr argument. */
4677         abi_ulong atptr;
4678         get_user_ual(atptr, ptr);
4679         ret = do_semctl(first, second, third, atptr);
4680         break;
4681     }
4682 
4683     case IPCOP_msgget:
4684         ret = get_errno(msgget(first, second));
4685         break;
4686 
4687     case IPCOP_msgsnd:
4688         ret = do_msgsnd(first, ptr, second, third);
4689         break;
4690 
4691     case IPCOP_msgctl:
4692         ret = do_msgctl(first, second, ptr);
4693         break;
4694 
4695     case IPCOP_msgrcv:
4696         switch (version) {
4697         case 0:
4698             {
4699                 struct target_ipc_kludge {
4700                     abi_long msgp;
4701                     abi_long msgtyp;
4702                 } *tmp;
4703 
4704                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4705                     ret = -TARGET_EFAULT;
4706                     break;
4707                 }
4708 
4709                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4710 
4711                 unlock_user_struct(tmp, ptr, 0);
4712                 break;
4713             }
4714         default:
4715             ret = do_msgrcv(first, ptr, second, fifth, third);
4716         }
4717         break;
4718 
4719     case IPCOP_shmat:
4720         switch (version) {
4721         default:
4722         {
4723             abi_ulong raddr;
4724             raddr = do_shmat(cpu_env, first, ptr, second);
4725             if (is_error(raddr))
4726                 return get_errno(raddr);
4727             if (put_user_ual(raddr, third))
4728                 return -TARGET_EFAULT;
4729             break;
4730         }
4731         case 1:
4732             ret = -TARGET_EINVAL;
4733             break;
4734         }
4735 	break;
4736     case IPCOP_shmdt:
4737         ret = do_shmdt(ptr);
4738 	break;
4739 
4740     case IPCOP_shmget:
4741 	/* IPC_* flag values are the same on all linux platforms */
4742 	ret = get_errno(shmget(first, second, third));
4743 	break;
4744 
4745 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4746     case IPCOP_shmctl:
4747         ret = do_shmctl(first, second, ptr);
4748         break;
4749     default:
4750         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4751                       call, version);
4752 	ret = -TARGET_ENOSYS;
4753 	break;
4754     }
4755     return ret;
4756 }
4757 #endif
4758 
4759 /* kernel structure types definitions */
4760 
4761 #define STRUCT(name, ...) STRUCT_ ## name,
4762 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4763 enum {
4764 #include "syscall_types.h"
4765 STRUCT_MAX
4766 };
4767 #undef STRUCT
4768 #undef STRUCT_SPECIAL
4769 
4770 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4771 #define STRUCT_SPECIAL(name)
4772 #include "syscall_types.h"
4773 #undef STRUCT
4774 #undef STRUCT_SPECIAL
4775 
4776 #define MAX_STRUCT_SIZE 4096
4777 
4778 #ifdef CONFIG_FIEMAP
4779 /* So fiemap access checks don't overflow on 32 bit systems.
4780  * This is very slightly smaller than the limit imposed by
4781  * the underlying kernel.
4782  */
4783 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4784                             / sizeof(struct fiemap_extent))
4785 
4786 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4787                                        int fd, int cmd, abi_long arg)
4788 {
4789     /* The parameter for this ioctl is a struct fiemap followed
4790      * by an array of struct fiemap_extent whose size is set
4791      * in fiemap->fm_extent_count. The array is filled in by the
4792      * ioctl.
4793      */
4794     int target_size_in, target_size_out;
4795     struct fiemap *fm;
4796     const argtype *arg_type = ie->arg_type;
4797     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4798     void *argptr, *p;
4799     abi_long ret;
4800     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4801     uint32_t outbufsz;
4802     int free_fm = 0;
4803 
4804     assert(arg_type[0] == TYPE_PTR);
4805     assert(ie->access == IOC_RW);
4806     arg_type++;
4807     target_size_in = thunk_type_size(arg_type, 0);
4808     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4809     if (!argptr) {
4810         return -TARGET_EFAULT;
4811     }
4812     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4813     unlock_user(argptr, arg, 0);
4814     fm = (struct fiemap *)buf_temp;
4815     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4816         return -TARGET_EINVAL;
4817     }
4818 
4819     outbufsz = sizeof (*fm) +
4820         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4821 
4822     if (outbufsz > MAX_STRUCT_SIZE) {
4823         /* We can't fit all the extents into the fixed size buffer.
4824          * Allocate one that is large enough and use it instead.
4825          */
4826         fm = g_try_malloc(outbufsz);
4827         if (!fm) {
4828             return -TARGET_ENOMEM;
4829         }
4830         memcpy(fm, buf_temp, sizeof(struct fiemap));
4831         free_fm = 1;
4832     }
4833     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4834     if (!is_error(ret)) {
4835         target_size_out = target_size_in;
4836         /* An extent_count of 0 means we were only counting the extents
4837          * so there are no structs to copy
4838          */
4839         if (fm->fm_extent_count != 0) {
4840             target_size_out += fm->fm_mapped_extents * extent_size;
4841         }
4842         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4843         if (!argptr) {
4844             ret = -TARGET_EFAULT;
4845         } else {
4846             /* Convert the struct fiemap */
4847             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4848             if (fm->fm_extent_count != 0) {
4849                 p = argptr + target_size_in;
4850                 /* ...and then all the struct fiemap_extents */
4851                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4852                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4853                                   THUNK_TARGET);
4854                     p += extent_size;
4855                 }
4856             }
4857             unlock_user(argptr, arg, target_size_out);
4858         }
4859     }
4860     if (free_fm) {
4861         g_free(fm);
4862     }
4863     return ret;
4864 }
4865 #endif
4866 
4867 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4868                                 int fd, int cmd, abi_long arg)
4869 {
4870     const argtype *arg_type = ie->arg_type;
4871     int target_size;
4872     void *argptr;
4873     int ret;
4874     struct ifconf *host_ifconf;
4875     uint32_t outbufsz;
4876     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4877     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4878     int target_ifreq_size;
4879     int nb_ifreq;
4880     int free_buf = 0;
4881     int i;
4882     int target_ifc_len;
4883     abi_long target_ifc_buf;
4884     int host_ifc_len;
4885     char *host_ifc_buf;
4886 
4887     assert(arg_type[0] == TYPE_PTR);
4888     assert(ie->access == IOC_RW);
4889 
4890     arg_type++;
4891     target_size = thunk_type_size(arg_type, 0);
4892 
4893     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4894     if (!argptr)
4895         return -TARGET_EFAULT;
4896     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4897     unlock_user(argptr, arg, 0);
4898 
4899     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4900     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4901     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4902 
4903     if (target_ifc_buf != 0) {
4904         target_ifc_len = host_ifconf->ifc_len;
4905         nb_ifreq = target_ifc_len / target_ifreq_size;
4906         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4907 
4908         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4909         if (outbufsz > MAX_STRUCT_SIZE) {
4910             /*
4911              * We can't fit all the extents into the fixed size buffer.
4912              * Allocate one that is large enough and use it instead.
4913              */
4914             host_ifconf = g_try_malloc(outbufsz);
4915             if (!host_ifconf) {
4916                 return -TARGET_ENOMEM;
4917             }
4918             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4919             free_buf = 1;
4920         }
4921         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4922 
4923         host_ifconf->ifc_len = host_ifc_len;
4924     } else {
4925       host_ifc_buf = NULL;
4926     }
4927     host_ifconf->ifc_buf = host_ifc_buf;
4928 
4929     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4930     if (!is_error(ret)) {
4931 	/* convert host ifc_len to target ifc_len */
4932 
4933         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4934         target_ifc_len = nb_ifreq * target_ifreq_size;
4935         host_ifconf->ifc_len = target_ifc_len;
4936 
4937 	/* restore target ifc_buf */
4938 
4939         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4940 
4941 	/* copy struct ifconf to target user */
4942 
4943         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4944         if (!argptr)
4945             return -TARGET_EFAULT;
4946         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4947         unlock_user(argptr, arg, target_size);
4948 
4949         if (target_ifc_buf != 0) {
4950             /* copy ifreq[] to target user */
4951             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4952             for (i = 0; i < nb_ifreq ; i++) {
4953                 thunk_convert(argptr + i * target_ifreq_size,
4954                               host_ifc_buf + i * sizeof(struct ifreq),
4955                               ifreq_arg_type, THUNK_TARGET);
4956             }
4957             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4958         }
4959     }
4960 
4961     if (free_buf) {
4962         g_free(host_ifconf);
4963     }
4964 
4965     return ret;
4966 }
4967 
4968 #if defined(CONFIG_USBFS)
4969 #if HOST_LONG_BITS > 64
4970 #error USBDEVFS thunks do not support >64 bit hosts yet.
4971 #endif
4972 struct live_urb {
4973     uint64_t target_urb_adr;
4974     uint64_t target_buf_adr;
4975     char *target_buf_ptr;
4976     struct usbdevfs_urb host_urb;
4977 };
4978 
4979 static GHashTable *usbdevfs_urb_hashtable(void)
4980 {
4981     static GHashTable *urb_hashtable;
4982 
4983     if (!urb_hashtable) {
4984         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4985     }
4986     return urb_hashtable;
4987 }
4988 
4989 static void urb_hashtable_insert(struct live_urb *urb)
4990 {
4991     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4992     g_hash_table_insert(urb_hashtable, urb, urb);
4993 }
4994 
4995 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4996 {
4997     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4998     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4999 }
5000 
5001 static void urb_hashtable_remove(struct live_urb *urb)
5002 {
5003     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5004     g_hash_table_remove(urb_hashtable, urb);
5005 }
5006 
5007 static abi_long
5008 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5009                           int fd, int cmd, abi_long arg)
5010 {
5011     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5012     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5013     struct live_urb *lurb;
5014     void *argptr;
5015     uint64_t hurb;
5016     int target_size;
5017     uintptr_t target_urb_adr;
5018     abi_long ret;
5019 
5020     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5021 
5022     memset(buf_temp, 0, sizeof(uint64_t));
5023     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5024     if (is_error(ret)) {
5025         return ret;
5026     }
5027 
5028     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5029     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5030     if (!lurb->target_urb_adr) {
5031         return -TARGET_EFAULT;
5032     }
5033     urb_hashtable_remove(lurb);
5034     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5035         lurb->host_urb.buffer_length);
5036     lurb->target_buf_ptr = NULL;
5037 
5038     /* restore the guest buffer pointer */
5039     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5040 
5041     /* update the guest urb struct */
5042     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5043     if (!argptr) {
5044         g_free(lurb);
5045         return -TARGET_EFAULT;
5046     }
5047     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5048     unlock_user(argptr, lurb->target_urb_adr, target_size);
5049 
5050     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5051     /* write back the urb handle */
5052     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5053     if (!argptr) {
5054         g_free(lurb);
5055         return -TARGET_EFAULT;
5056     }
5057 
5058     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5059     target_urb_adr = lurb->target_urb_adr;
5060     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5061     unlock_user(argptr, arg, target_size);
5062 
5063     g_free(lurb);
5064     return ret;
5065 }
5066 
5067 static abi_long
5068 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5069                              uint8_t *buf_temp __attribute__((unused)),
5070                              int fd, int cmd, abi_long arg)
5071 {
5072     struct live_urb *lurb;
5073 
5074     /* map target address back to host URB with metadata. */
5075     lurb = urb_hashtable_lookup(arg);
5076     if (!lurb) {
5077         return -TARGET_EFAULT;
5078     }
5079     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5080 }
5081 
5082 static abi_long
5083 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5084                             int fd, int cmd, abi_long arg)
5085 {
5086     const argtype *arg_type = ie->arg_type;
5087     int target_size;
5088     abi_long ret;
5089     void *argptr;
5090     int rw_dir;
5091     struct live_urb *lurb;
5092 
5093     /*
5094      * each submitted URB needs to map to a unique ID for the
5095      * kernel, and that unique ID needs to be a pointer to
5096      * host memory.  hence, we need to malloc for each URB.
5097      * isochronous transfers have a variable length struct.
5098      */
5099     arg_type++;
5100     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5101 
5102     /* construct host copy of urb and metadata */
5103     lurb = g_try_new0(struct live_urb, 1);
5104     if (!lurb) {
5105         return -TARGET_ENOMEM;
5106     }
5107 
5108     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5109     if (!argptr) {
5110         g_free(lurb);
5111         return -TARGET_EFAULT;
5112     }
5113     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5114     unlock_user(argptr, arg, 0);
5115 
5116     lurb->target_urb_adr = arg;
5117     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5118 
5119     /* buffer space used depends on endpoint type so lock the entire buffer */
5120     /* control type urbs should check the buffer contents for true direction */
5121     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5122     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5123         lurb->host_urb.buffer_length, 1);
5124     if (lurb->target_buf_ptr == NULL) {
5125         g_free(lurb);
5126         return -TARGET_EFAULT;
5127     }
5128 
5129     /* update buffer pointer in host copy */
5130     lurb->host_urb.buffer = lurb->target_buf_ptr;
5131 
5132     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5133     if (is_error(ret)) {
5134         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5135         g_free(lurb);
5136     } else {
5137         urb_hashtable_insert(lurb);
5138     }
5139 
5140     return ret;
5141 }
5142 #endif /* CONFIG_USBFS */
5143 
5144 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5145                             int cmd, abi_long arg)
5146 {
5147     void *argptr;
5148     struct dm_ioctl *host_dm;
5149     abi_long guest_data;
5150     uint32_t guest_data_size;
5151     int target_size;
5152     const argtype *arg_type = ie->arg_type;
5153     abi_long ret;
5154     void *big_buf = NULL;
5155     char *host_data;
5156 
5157     arg_type++;
5158     target_size = thunk_type_size(arg_type, 0);
5159     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5160     if (!argptr) {
5161         ret = -TARGET_EFAULT;
5162         goto out;
5163     }
5164     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5165     unlock_user(argptr, arg, 0);
5166 
5167     /* buf_temp is too small, so fetch things into a bigger buffer */
5168     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5169     memcpy(big_buf, buf_temp, target_size);
5170     buf_temp = big_buf;
5171     host_dm = big_buf;
5172 
5173     guest_data = arg + host_dm->data_start;
5174     if ((guest_data - arg) < 0) {
5175         ret = -TARGET_EINVAL;
5176         goto out;
5177     }
5178     guest_data_size = host_dm->data_size - host_dm->data_start;
5179     host_data = (char*)host_dm + host_dm->data_start;
5180 
5181     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5182     if (!argptr) {
5183         ret = -TARGET_EFAULT;
5184         goto out;
5185     }
5186 
5187     switch (ie->host_cmd) {
5188     case DM_REMOVE_ALL:
5189     case DM_LIST_DEVICES:
5190     case DM_DEV_CREATE:
5191     case DM_DEV_REMOVE:
5192     case DM_DEV_SUSPEND:
5193     case DM_DEV_STATUS:
5194     case DM_DEV_WAIT:
5195     case DM_TABLE_STATUS:
5196     case DM_TABLE_CLEAR:
5197     case DM_TABLE_DEPS:
5198     case DM_LIST_VERSIONS:
5199         /* no input data */
5200         break;
5201     case DM_DEV_RENAME:
5202     case DM_DEV_SET_GEOMETRY:
5203         /* data contains only strings */
5204         memcpy(host_data, argptr, guest_data_size);
5205         break;
5206     case DM_TARGET_MSG:
5207         memcpy(host_data, argptr, guest_data_size);
5208         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5209         break;
5210     case DM_TABLE_LOAD:
5211     {
5212         void *gspec = argptr;
5213         void *cur_data = host_data;
5214         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5215         int spec_size = thunk_type_size(arg_type, 0);
5216         int i;
5217 
5218         for (i = 0; i < host_dm->target_count; i++) {
5219             struct dm_target_spec *spec = cur_data;
5220             uint32_t next;
5221             int slen;
5222 
5223             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5224             slen = strlen((char*)gspec + spec_size) + 1;
5225             next = spec->next;
5226             spec->next = sizeof(*spec) + slen;
5227             strcpy((char*)&spec[1], gspec + spec_size);
5228             gspec += next;
5229             cur_data += spec->next;
5230         }
5231         break;
5232     }
5233     default:
5234         ret = -TARGET_EINVAL;
5235         unlock_user(argptr, guest_data, 0);
5236         goto out;
5237     }
5238     unlock_user(argptr, guest_data, 0);
5239 
5240     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5241     if (!is_error(ret)) {
5242         guest_data = arg + host_dm->data_start;
5243         guest_data_size = host_dm->data_size - host_dm->data_start;
5244         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5245         switch (ie->host_cmd) {
5246         case DM_REMOVE_ALL:
5247         case DM_DEV_CREATE:
5248         case DM_DEV_REMOVE:
5249         case DM_DEV_RENAME:
5250         case DM_DEV_SUSPEND:
5251         case DM_DEV_STATUS:
5252         case DM_TABLE_LOAD:
5253         case DM_TABLE_CLEAR:
5254         case DM_TARGET_MSG:
5255         case DM_DEV_SET_GEOMETRY:
5256             /* no return data */
5257             break;
5258         case DM_LIST_DEVICES:
5259         {
5260             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5261             uint32_t remaining_data = guest_data_size;
5262             void *cur_data = argptr;
5263             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5264             int nl_size = 12; /* can't use thunk_size due to alignment */
5265 
5266             while (1) {
5267                 uint32_t next = nl->next;
5268                 if (next) {
5269                     nl->next = nl_size + (strlen(nl->name) + 1);
5270                 }
5271                 if (remaining_data < nl->next) {
5272                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5273                     break;
5274                 }
5275                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5276                 strcpy(cur_data + nl_size, nl->name);
5277                 cur_data += nl->next;
5278                 remaining_data -= nl->next;
5279                 if (!next) {
5280                     break;
5281                 }
5282                 nl = (void*)nl + next;
5283             }
5284             break;
5285         }
5286         case DM_DEV_WAIT:
5287         case DM_TABLE_STATUS:
5288         {
5289             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5290             void *cur_data = argptr;
5291             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5292             int spec_size = thunk_type_size(arg_type, 0);
5293             int i;
5294 
5295             for (i = 0; i < host_dm->target_count; i++) {
5296                 uint32_t next = spec->next;
5297                 int slen = strlen((char*)&spec[1]) + 1;
5298                 spec->next = (cur_data - argptr) + spec_size + slen;
5299                 if (guest_data_size < spec->next) {
5300                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5301                     break;
5302                 }
5303                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5304                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5305                 cur_data = argptr + spec->next;
5306                 spec = (void*)host_dm + host_dm->data_start + next;
5307             }
5308             break;
5309         }
5310         case DM_TABLE_DEPS:
5311         {
5312             void *hdata = (void*)host_dm + host_dm->data_start;
5313             int count = *(uint32_t*)hdata;
5314             uint64_t *hdev = hdata + 8;
5315             uint64_t *gdev = argptr + 8;
5316             int i;
5317 
5318             *(uint32_t*)argptr = tswap32(count);
5319             for (i = 0; i < count; i++) {
5320                 *gdev = tswap64(*hdev);
5321                 gdev++;
5322                 hdev++;
5323             }
5324             break;
5325         }
5326         case DM_LIST_VERSIONS:
5327         {
5328             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5329             uint32_t remaining_data = guest_data_size;
5330             void *cur_data = argptr;
5331             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5332             int vers_size = thunk_type_size(arg_type, 0);
5333 
5334             while (1) {
5335                 uint32_t next = vers->next;
5336                 if (next) {
5337                     vers->next = vers_size + (strlen(vers->name) + 1);
5338                 }
5339                 if (remaining_data < vers->next) {
5340                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5341                     break;
5342                 }
5343                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5344                 strcpy(cur_data + vers_size, vers->name);
5345                 cur_data += vers->next;
5346                 remaining_data -= vers->next;
5347                 if (!next) {
5348                     break;
5349                 }
5350                 vers = (void*)vers + next;
5351             }
5352             break;
5353         }
5354         default:
5355             unlock_user(argptr, guest_data, 0);
5356             ret = -TARGET_EINVAL;
5357             goto out;
5358         }
5359         unlock_user(argptr, guest_data, guest_data_size);
5360 
5361         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5362         if (!argptr) {
5363             ret = -TARGET_EFAULT;
5364             goto out;
5365         }
5366         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5367         unlock_user(argptr, arg, target_size);
5368     }
5369 out:
5370     g_free(big_buf);
5371     return ret;
5372 }
5373 
5374 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5375                                int cmd, abi_long arg)
5376 {
5377     void *argptr;
5378     int target_size;
5379     const argtype *arg_type = ie->arg_type;
5380     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5381     abi_long ret;
5382 
5383     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5384     struct blkpg_partition host_part;
5385 
5386     /* Read and convert blkpg */
5387     arg_type++;
5388     target_size = thunk_type_size(arg_type, 0);
5389     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5390     if (!argptr) {
5391         ret = -TARGET_EFAULT;
5392         goto out;
5393     }
5394     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5395     unlock_user(argptr, arg, 0);
5396 
5397     switch (host_blkpg->op) {
5398     case BLKPG_ADD_PARTITION:
5399     case BLKPG_DEL_PARTITION:
5400         /* payload is struct blkpg_partition */
5401         break;
5402     default:
5403         /* Unknown opcode */
5404         ret = -TARGET_EINVAL;
5405         goto out;
5406     }
5407 
5408     /* Read and convert blkpg->data */
5409     arg = (abi_long)(uintptr_t)host_blkpg->data;
5410     target_size = thunk_type_size(part_arg_type, 0);
5411     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5412     if (!argptr) {
5413         ret = -TARGET_EFAULT;
5414         goto out;
5415     }
5416     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5417     unlock_user(argptr, arg, 0);
5418 
5419     /* Swizzle the data pointer to our local copy and call! */
5420     host_blkpg->data = &host_part;
5421     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5422 
5423 out:
5424     return ret;
5425 }
5426 
5427 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5428                                 int fd, int cmd, abi_long arg)
5429 {
5430     const argtype *arg_type = ie->arg_type;
5431     const StructEntry *se;
5432     const argtype *field_types;
5433     const int *dst_offsets, *src_offsets;
5434     int target_size;
5435     void *argptr;
5436     abi_ulong *target_rt_dev_ptr = NULL;
5437     unsigned long *host_rt_dev_ptr = NULL;
5438     abi_long ret;
5439     int i;
5440 
5441     assert(ie->access == IOC_W);
5442     assert(*arg_type == TYPE_PTR);
5443     arg_type++;
5444     assert(*arg_type == TYPE_STRUCT);
5445     target_size = thunk_type_size(arg_type, 0);
5446     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5447     if (!argptr) {
5448         return -TARGET_EFAULT;
5449     }
5450     arg_type++;
5451     assert(*arg_type == (int)STRUCT_rtentry);
5452     se = struct_entries + *arg_type++;
5453     assert(se->convert[0] == NULL);
5454     /* convert struct here to be able to catch rt_dev string */
5455     field_types = se->field_types;
5456     dst_offsets = se->field_offsets[THUNK_HOST];
5457     src_offsets = se->field_offsets[THUNK_TARGET];
5458     for (i = 0; i < se->nb_fields; i++) {
5459         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5460             assert(*field_types == TYPE_PTRVOID);
5461             target_rt_dev_ptr = argptr + src_offsets[i];
5462             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5463             if (*target_rt_dev_ptr != 0) {
5464                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5465                                                   tswapal(*target_rt_dev_ptr));
5466                 if (!*host_rt_dev_ptr) {
5467                     unlock_user(argptr, arg, 0);
5468                     return -TARGET_EFAULT;
5469                 }
5470             } else {
5471                 *host_rt_dev_ptr = 0;
5472             }
5473             field_types++;
5474             continue;
5475         }
5476         field_types = thunk_convert(buf_temp + dst_offsets[i],
5477                                     argptr + src_offsets[i],
5478                                     field_types, THUNK_HOST);
5479     }
5480     unlock_user(argptr, arg, 0);
5481 
5482     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5483 
5484     assert(host_rt_dev_ptr != NULL);
5485     assert(target_rt_dev_ptr != NULL);
5486     if (*host_rt_dev_ptr != 0) {
5487         unlock_user((void *)*host_rt_dev_ptr,
5488                     *target_rt_dev_ptr, 0);
5489     }
5490     return ret;
5491 }
5492 
5493 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5494                                      int fd, int cmd, abi_long arg)
5495 {
5496     int sig = target_to_host_signal(arg);
5497     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5498 }
5499 
5500 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5501                                     int fd, int cmd, abi_long arg)
5502 {
5503     struct timeval tv;
5504     abi_long ret;
5505 
5506     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5507     if (is_error(ret)) {
5508         return ret;
5509     }
5510 
5511     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5512         if (copy_to_user_timeval(arg, &tv)) {
5513             return -TARGET_EFAULT;
5514         }
5515     } else {
5516         if (copy_to_user_timeval64(arg, &tv)) {
5517             return -TARGET_EFAULT;
5518         }
5519     }
5520 
5521     return ret;
5522 }
5523 
5524 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5525                                       int fd, int cmd, abi_long arg)
5526 {
5527     struct timespec ts;
5528     abi_long ret;
5529 
5530     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5531     if (is_error(ret)) {
5532         return ret;
5533     }
5534 
5535     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5536         if (host_to_target_timespec(arg, &ts)) {
5537             return -TARGET_EFAULT;
5538         }
5539     } else{
5540         if (host_to_target_timespec64(arg, &ts)) {
5541             return -TARGET_EFAULT;
5542         }
5543     }
5544 
5545     return ret;
5546 }
5547 
5548 #ifdef TIOCGPTPEER
5549 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5550                                      int fd, int cmd, abi_long arg)
5551 {
5552     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5553     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5554 }
5555 #endif
5556 
5557 #ifdef HAVE_DRM_H
5558 
5559 static void unlock_drm_version(struct drm_version *host_ver,
5560                                struct target_drm_version *target_ver,
5561                                bool copy)
5562 {
5563     unlock_user(host_ver->name, target_ver->name,
5564                                 copy ? host_ver->name_len : 0);
5565     unlock_user(host_ver->date, target_ver->date,
5566                                 copy ? host_ver->date_len : 0);
5567     unlock_user(host_ver->desc, target_ver->desc,
5568                                 copy ? host_ver->desc_len : 0);
5569 }
5570 
5571 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5572                                           struct target_drm_version *target_ver)
5573 {
5574     memset(host_ver, 0, sizeof(*host_ver));
5575 
5576     __get_user(host_ver->name_len, &target_ver->name_len);
5577     if (host_ver->name_len) {
5578         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5579                                    target_ver->name_len, 0);
5580         if (!host_ver->name) {
5581             return -EFAULT;
5582         }
5583     }
5584 
5585     __get_user(host_ver->date_len, &target_ver->date_len);
5586     if (host_ver->date_len) {
5587         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5588                                    target_ver->date_len, 0);
5589         if (!host_ver->date) {
5590             goto err;
5591         }
5592     }
5593 
5594     __get_user(host_ver->desc_len, &target_ver->desc_len);
5595     if (host_ver->desc_len) {
5596         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5597                                    target_ver->desc_len, 0);
5598         if (!host_ver->desc) {
5599             goto err;
5600         }
5601     }
5602 
5603     return 0;
5604 err:
5605     unlock_drm_version(host_ver, target_ver, false);
5606     return -EFAULT;
5607 }
5608 
5609 static inline void host_to_target_drmversion(
5610                                           struct target_drm_version *target_ver,
5611                                           struct drm_version *host_ver)
5612 {
5613     __put_user(host_ver->version_major, &target_ver->version_major);
5614     __put_user(host_ver->version_minor, &target_ver->version_minor);
5615     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5616     __put_user(host_ver->name_len, &target_ver->name_len);
5617     __put_user(host_ver->date_len, &target_ver->date_len);
5618     __put_user(host_ver->desc_len, &target_ver->desc_len);
5619     unlock_drm_version(host_ver, target_ver, true);
5620 }
5621 
5622 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5623                              int fd, int cmd, abi_long arg)
5624 {
5625     struct drm_version *ver;
5626     struct target_drm_version *target_ver;
5627     abi_long ret;
5628 
5629     switch (ie->host_cmd) {
5630     case DRM_IOCTL_VERSION:
5631         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5632             return -TARGET_EFAULT;
5633         }
5634         ver = (struct drm_version *)buf_temp;
5635         ret = target_to_host_drmversion(ver, target_ver);
5636         if (!is_error(ret)) {
5637             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5638             if (is_error(ret)) {
5639                 unlock_drm_version(ver, target_ver, false);
5640             } else {
5641                 host_to_target_drmversion(target_ver, ver);
5642             }
5643         }
5644         unlock_user_struct(target_ver, arg, 0);
5645         return ret;
5646     }
5647     return -TARGET_ENOSYS;
5648 }
5649 
5650 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5651                                            struct drm_i915_getparam *gparam,
5652                                            int fd, abi_long arg)
5653 {
5654     abi_long ret;
5655     int value;
5656     struct target_drm_i915_getparam *target_gparam;
5657 
5658     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5659         return -TARGET_EFAULT;
5660     }
5661 
5662     __get_user(gparam->param, &target_gparam->param);
5663     gparam->value = &value;
5664     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5665     put_user_s32(value, target_gparam->value);
5666 
5667     unlock_user_struct(target_gparam, arg, 0);
5668     return ret;
5669 }
5670 
5671 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5672                                   int fd, int cmd, abi_long arg)
5673 {
5674     switch (ie->host_cmd) {
5675     case DRM_IOCTL_I915_GETPARAM:
5676         return do_ioctl_drm_i915_getparam(ie,
5677                                           (struct drm_i915_getparam *)buf_temp,
5678                                           fd, arg);
5679     default:
5680         return -TARGET_ENOSYS;
5681     }
5682 }
5683 
5684 #endif
5685 
5686 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5687                                         int fd, int cmd, abi_long arg)
5688 {
5689     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5690     struct tun_filter *target_filter;
5691     char *target_addr;
5692 
5693     assert(ie->access == IOC_W);
5694 
5695     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5696     if (!target_filter) {
5697         return -TARGET_EFAULT;
5698     }
5699     filter->flags = tswap16(target_filter->flags);
5700     filter->count = tswap16(target_filter->count);
5701     unlock_user(target_filter, arg, 0);
5702 
5703     if (filter->count) {
5704         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5705             MAX_STRUCT_SIZE) {
5706             return -TARGET_EFAULT;
5707         }
5708 
5709         target_addr = lock_user(VERIFY_READ,
5710                                 arg + offsetof(struct tun_filter, addr),
5711                                 filter->count * ETH_ALEN, 1);
5712         if (!target_addr) {
5713             return -TARGET_EFAULT;
5714         }
5715         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5716         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5717     }
5718 
5719     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5720 }
5721 
5722 IOCTLEntry ioctl_entries[] = {
5723 #define IOCTL(cmd, access, ...) \
5724     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5725 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5726     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5727 #define IOCTL_IGNORE(cmd) \
5728     { TARGET_ ## cmd, 0, #cmd },
5729 #include "ioctls.h"
5730     { 0, 0, },
5731 };
5732 
5733 /* ??? Implement proper locking for ioctls.  */
5734 /* do_ioctl() Must return target values and target errnos. */
5735 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5736 {
5737     const IOCTLEntry *ie;
5738     const argtype *arg_type;
5739     abi_long ret;
5740     uint8_t buf_temp[MAX_STRUCT_SIZE];
5741     int target_size;
5742     void *argptr;
5743 
5744     ie = ioctl_entries;
5745     for(;;) {
5746         if (ie->target_cmd == 0) {
5747             qemu_log_mask(
5748                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5749             return -TARGET_ENOSYS;
5750         }
5751         if (ie->target_cmd == cmd)
5752             break;
5753         ie++;
5754     }
5755     arg_type = ie->arg_type;
5756     if (ie->do_ioctl) {
5757         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5758     } else if (!ie->host_cmd) {
5759         /* Some architectures define BSD ioctls in their headers
5760            that are not implemented in Linux.  */
5761         return -TARGET_ENOSYS;
5762     }
5763 
5764     switch(arg_type[0]) {
5765     case TYPE_NULL:
5766         /* no argument */
5767         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5768         break;
5769     case TYPE_PTRVOID:
5770     case TYPE_INT:
5771     case TYPE_LONG:
5772     case TYPE_ULONG:
5773         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5774         break;
5775     case TYPE_PTR:
5776         arg_type++;
5777         target_size = thunk_type_size(arg_type, 0);
5778         switch(ie->access) {
5779         case IOC_R:
5780             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5781             if (!is_error(ret)) {
5782                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5783                 if (!argptr)
5784                     return -TARGET_EFAULT;
5785                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5786                 unlock_user(argptr, arg, target_size);
5787             }
5788             break;
5789         case IOC_W:
5790             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5791             if (!argptr)
5792                 return -TARGET_EFAULT;
5793             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5794             unlock_user(argptr, arg, 0);
5795             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5796             break;
5797         default:
5798         case IOC_RW:
5799             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5800             if (!argptr)
5801                 return -TARGET_EFAULT;
5802             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5803             unlock_user(argptr, arg, 0);
5804             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5805             if (!is_error(ret)) {
5806                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5807                 if (!argptr)
5808                     return -TARGET_EFAULT;
5809                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5810                 unlock_user(argptr, arg, target_size);
5811             }
5812             break;
5813         }
5814         break;
5815     default:
5816         qemu_log_mask(LOG_UNIMP,
5817                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5818                       (long)cmd, arg_type[0]);
5819         ret = -TARGET_ENOSYS;
5820         break;
5821     }
5822     return ret;
5823 }
5824 
5825 static const bitmask_transtbl iflag_tbl[] = {
5826         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5827         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5828         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5829         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5830         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5831         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5832         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5833         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5834         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5835         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5836         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5837         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5838         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5839         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5840         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5841         { 0, 0, 0, 0 }
5842 };
5843 
5844 static const bitmask_transtbl oflag_tbl[] = {
5845 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5846 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5847 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5848 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5849 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5850 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5851 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5852 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5853 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5854 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5855 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5856 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5857 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5858 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5859 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5860 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5861 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5862 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5863 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5864 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5865 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5866 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5867 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5868 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5869 	{ 0, 0, 0, 0 }
5870 };
5871 
5872 static const bitmask_transtbl cflag_tbl[] = {
5873 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5874 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5875 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5876 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5877 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5878 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5879 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5880 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5881 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5882 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5883 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5884 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5885 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5886 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5887 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5888 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5889 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5890 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5891 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5892 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5893 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5894 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5895 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5896 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5897 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5898 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5899 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5900 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5901 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5902 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5903 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5904 	{ 0, 0, 0, 0 }
5905 };
5906 
5907 static const bitmask_transtbl lflag_tbl[] = {
5908   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5909   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5910   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5911   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5912   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5913   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5914   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5915   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5916   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5917   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5918   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5919   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5920   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5921   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5922   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5923   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5924   { 0, 0, 0, 0 }
5925 };
5926 
5927 static void target_to_host_termios (void *dst, const void *src)
5928 {
5929     struct host_termios *host = dst;
5930     const struct target_termios *target = src;
5931 
5932     host->c_iflag =
5933         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5934     host->c_oflag =
5935         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5936     host->c_cflag =
5937         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5938     host->c_lflag =
5939         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5940     host->c_line = target->c_line;
5941 
5942     memset(host->c_cc, 0, sizeof(host->c_cc));
5943     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5944     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5945     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5946     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5947     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5948     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5949     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5950     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5951     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5952     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5953     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5954     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5955     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5956     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5957     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5958     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5959     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5960 }
5961 
5962 static void host_to_target_termios (void *dst, const void *src)
5963 {
5964     struct target_termios *target = dst;
5965     const struct host_termios *host = src;
5966 
5967     target->c_iflag =
5968         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5969     target->c_oflag =
5970         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5971     target->c_cflag =
5972         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5973     target->c_lflag =
5974         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5975     target->c_line = host->c_line;
5976 
5977     memset(target->c_cc, 0, sizeof(target->c_cc));
5978     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5979     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5980     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5981     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5982     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5983     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5984     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5985     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5986     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5987     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5988     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5989     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5990     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5991     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5992     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5993     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5994     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5995 }
5996 
5997 static const StructEntry struct_termios_def = {
5998     .convert = { host_to_target_termios, target_to_host_termios },
5999     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6000     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6001     .print = print_termios,
6002 };
6003 
6004 static const bitmask_transtbl mmap_flags_tbl[] = {
6005     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6006     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6007     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6008     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6009       MAP_ANONYMOUS, MAP_ANONYMOUS },
6010     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6011       MAP_GROWSDOWN, MAP_GROWSDOWN },
6012     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6013       MAP_DENYWRITE, MAP_DENYWRITE },
6014     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6015       MAP_EXECUTABLE, MAP_EXECUTABLE },
6016     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6017     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6018       MAP_NORESERVE, MAP_NORESERVE },
6019     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6020     /* MAP_STACK had been ignored by the kernel for quite some time.
6021        Recognize it for the target insofar as we do not want to pass
6022        it through to the host.  */
6023     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6024     { 0, 0, 0, 0 }
6025 };
6026 
6027 /*
6028  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6029  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6030  */
6031 #if defined(TARGET_I386)
6032 
6033 /* NOTE: there is really one LDT for all the threads */
6034 static uint8_t *ldt_table;
6035 
6036 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6037 {
6038     int size;
6039     void *p;
6040 
6041     if (!ldt_table)
6042         return 0;
6043     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6044     if (size > bytecount)
6045         size = bytecount;
6046     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6047     if (!p)
6048         return -TARGET_EFAULT;
6049     /* ??? Should this by byteswapped?  */
6050     memcpy(p, ldt_table, size);
6051     unlock_user(p, ptr, size);
6052     return size;
6053 }
6054 
6055 /* XXX: add locking support */
6056 static abi_long write_ldt(CPUX86State *env,
6057                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6058 {
6059     struct target_modify_ldt_ldt_s ldt_info;
6060     struct target_modify_ldt_ldt_s *target_ldt_info;
6061     int seg_32bit, contents, read_exec_only, limit_in_pages;
6062     int seg_not_present, useable, lm;
6063     uint32_t *lp, entry_1, entry_2;
6064 
6065     if (bytecount != sizeof(ldt_info))
6066         return -TARGET_EINVAL;
6067     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6068         return -TARGET_EFAULT;
6069     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6070     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6071     ldt_info.limit = tswap32(target_ldt_info->limit);
6072     ldt_info.flags = tswap32(target_ldt_info->flags);
6073     unlock_user_struct(target_ldt_info, ptr, 0);
6074 
6075     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6076         return -TARGET_EINVAL;
6077     seg_32bit = ldt_info.flags & 1;
6078     contents = (ldt_info.flags >> 1) & 3;
6079     read_exec_only = (ldt_info.flags >> 3) & 1;
6080     limit_in_pages = (ldt_info.flags >> 4) & 1;
6081     seg_not_present = (ldt_info.flags >> 5) & 1;
6082     useable = (ldt_info.flags >> 6) & 1;
6083 #ifdef TARGET_ABI32
6084     lm = 0;
6085 #else
6086     lm = (ldt_info.flags >> 7) & 1;
6087 #endif
6088     if (contents == 3) {
6089         if (oldmode)
6090             return -TARGET_EINVAL;
6091         if (seg_not_present == 0)
6092             return -TARGET_EINVAL;
6093     }
6094     /* allocate the LDT */
6095     if (!ldt_table) {
6096         env->ldt.base = target_mmap(0,
6097                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6098                                     PROT_READ|PROT_WRITE,
6099                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6100         if (env->ldt.base == -1)
6101             return -TARGET_ENOMEM;
6102         memset(g2h_untagged(env->ldt.base), 0,
6103                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6104         env->ldt.limit = 0xffff;
6105         ldt_table = g2h_untagged(env->ldt.base);
6106     }
6107 
6108     /* NOTE: same code as Linux kernel */
6109     /* Allow LDTs to be cleared by the user. */
6110     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6111         if (oldmode ||
6112             (contents == 0		&&
6113              read_exec_only == 1	&&
6114              seg_32bit == 0		&&
6115              limit_in_pages == 0	&&
6116              seg_not_present == 1	&&
6117              useable == 0 )) {
6118             entry_1 = 0;
6119             entry_2 = 0;
6120             goto install;
6121         }
6122     }
6123 
6124     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6125         (ldt_info.limit & 0x0ffff);
6126     entry_2 = (ldt_info.base_addr & 0xff000000) |
6127         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6128         (ldt_info.limit & 0xf0000) |
6129         ((read_exec_only ^ 1) << 9) |
6130         (contents << 10) |
6131         ((seg_not_present ^ 1) << 15) |
6132         (seg_32bit << 22) |
6133         (limit_in_pages << 23) |
6134         (lm << 21) |
6135         0x7000;
6136     if (!oldmode)
6137         entry_2 |= (useable << 20);
6138 
6139     /* Install the new entry ...  */
6140 install:
6141     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6142     lp[0] = tswap32(entry_1);
6143     lp[1] = tswap32(entry_2);
6144     return 0;
6145 }
6146 
6147 /* specific and weird i386 syscalls */
6148 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6149                               unsigned long bytecount)
6150 {
6151     abi_long ret;
6152 
6153     switch (func) {
6154     case 0:
6155         ret = read_ldt(ptr, bytecount);
6156         break;
6157     case 1:
6158         ret = write_ldt(env, ptr, bytecount, 1);
6159         break;
6160     case 0x11:
6161         ret = write_ldt(env, ptr, bytecount, 0);
6162         break;
6163     default:
6164         ret = -TARGET_ENOSYS;
6165         break;
6166     }
6167     return ret;
6168 }
6169 
6170 #if defined(TARGET_ABI32)
6171 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6172 {
6173     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6174     struct target_modify_ldt_ldt_s ldt_info;
6175     struct target_modify_ldt_ldt_s *target_ldt_info;
6176     int seg_32bit, contents, read_exec_only, limit_in_pages;
6177     int seg_not_present, useable, lm;
6178     uint32_t *lp, entry_1, entry_2;
6179     int i;
6180 
6181     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6182     if (!target_ldt_info)
6183         return -TARGET_EFAULT;
6184     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6185     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6186     ldt_info.limit = tswap32(target_ldt_info->limit);
6187     ldt_info.flags = tswap32(target_ldt_info->flags);
6188     if (ldt_info.entry_number == -1) {
6189         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6190             if (gdt_table[i] == 0) {
6191                 ldt_info.entry_number = i;
6192                 target_ldt_info->entry_number = tswap32(i);
6193                 break;
6194             }
6195         }
6196     }
6197     unlock_user_struct(target_ldt_info, ptr, 1);
6198 
6199     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6200         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6201            return -TARGET_EINVAL;
6202     seg_32bit = ldt_info.flags & 1;
6203     contents = (ldt_info.flags >> 1) & 3;
6204     read_exec_only = (ldt_info.flags >> 3) & 1;
6205     limit_in_pages = (ldt_info.flags >> 4) & 1;
6206     seg_not_present = (ldt_info.flags >> 5) & 1;
6207     useable = (ldt_info.flags >> 6) & 1;
6208 #ifdef TARGET_ABI32
6209     lm = 0;
6210 #else
6211     lm = (ldt_info.flags >> 7) & 1;
6212 #endif
6213 
6214     if (contents == 3) {
6215         if (seg_not_present == 0)
6216             return -TARGET_EINVAL;
6217     }
6218 
6219     /* NOTE: same code as Linux kernel */
6220     /* Allow LDTs to be cleared by the user. */
6221     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6222         if ((contents == 0             &&
6223              read_exec_only == 1       &&
6224              seg_32bit == 0            &&
6225              limit_in_pages == 0       &&
6226              seg_not_present == 1      &&
6227              useable == 0 )) {
6228             entry_1 = 0;
6229             entry_2 = 0;
6230             goto install;
6231         }
6232     }
6233 
6234     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6235         (ldt_info.limit & 0x0ffff);
6236     entry_2 = (ldt_info.base_addr & 0xff000000) |
6237         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6238         (ldt_info.limit & 0xf0000) |
6239         ((read_exec_only ^ 1) << 9) |
6240         (contents << 10) |
6241         ((seg_not_present ^ 1) << 15) |
6242         (seg_32bit << 22) |
6243         (limit_in_pages << 23) |
6244         (useable << 20) |
6245         (lm << 21) |
6246         0x7000;
6247 
6248     /* Install the new entry ...  */
6249 install:
6250     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6251     lp[0] = tswap32(entry_1);
6252     lp[1] = tswap32(entry_2);
6253     return 0;
6254 }
6255 
6256 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6257 {
6258     struct target_modify_ldt_ldt_s *target_ldt_info;
6259     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6260     uint32_t base_addr, limit, flags;
6261     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6262     int seg_not_present, useable, lm;
6263     uint32_t *lp, entry_1, entry_2;
6264 
6265     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6266     if (!target_ldt_info)
6267         return -TARGET_EFAULT;
6268     idx = tswap32(target_ldt_info->entry_number);
6269     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6270         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6271         unlock_user_struct(target_ldt_info, ptr, 1);
6272         return -TARGET_EINVAL;
6273     }
6274     lp = (uint32_t *)(gdt_table + idx);
6275     entry_1 = tswap32(lp[0]);
6276     entry_2 = tswap32(lp[1]);
6277 
6278     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6279     contents = (entry_2 >> 10) & 3;
6280     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6281     seg_32bit = (entry_2 >> 22) & 1;
6282     limit_in_pages = (entry_2 >> 23) & 1;
6283     useable = (entry_2 >> 20) & 1;
6284 #ifdef TARGET_ABI32
6285     lm = 0;
6286 #else
6287     lm = (entry_2 >> 21) & 1;
6288 #endif
6289     flags = (seg_32bit << 0) | (contents << 1) |
6290         (read_exec_only << 3) | (limit_in_pages << 4) |
6291         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6292     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6293     base_addr = (entry_1 >> 16) |
6294         (entry_2 & 0xff000000) |
6295         ((entry_2 & 0xff) << 16);
6296     target_ldt_info->base_addr = tswapal(base_addr);
6297     target_ldt_info->limit = tswap32(limit);
6298     target_ldt_info->flags = tswap32(flags);
6299     unlock_user_struct(target_ldt_info, ptr, 1);
6300     return 0;
6301 }
6302 
6303 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6304 {
6305     return -TARGET_ENOSYS;
6306 }
6307 #else
6308 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6309 {
6310     abi_long ret = 0;
6311     abi_ulong val;
6312     int idx;
6313 
6314     switch(code) {
6315     case TARGET_ARCH_SET_GS:
6316     case TARGET_ARCH_SET_FS:
6317         if (code == TARGET_ARCH_SET_GS)
6318             idx = R_GS;
6319         else
6320             idx = R_FS;
6321         cpu_x86_load_seg(env, idx, 0);
6322         env->segs[idx].base = addr;
6323         break;
6324     case TARGET_ARCH_GET_GS:
6325     case TARGET_ARCH_GET_FS:
6326         if (code == TARGET_ARCH_GET_GS)
6327             idx = R_GS;
6328         else
6329             idx = R_FS;
6330         val = env->segs[idx].base;
6331         if (put_user(val, addr, abi_ulong))
6332             ret = -TARGET_EFAULT;
6333         break;
6334     default:
6335         ret = -TARGET_EINVAL;
6336         break;
6337     }
6338     return ret;
6339 }
6340 #endif /* defined(TARGET_ABI32 */
6341 #endif /* defined(TARGET_I386) */
6342 
6343 /*
6344  * These constants are generic.  Supply any that are missing from the host.
6345  */
6346 #ifndef PR_SET_NAME
6347 # define PR_SET_NAME    15
6348 # define PR_GET_NAME    16
6349 #endif
6350 #ifndef PR_SET_FP_MODE
6351 # define PR_SET_FP_MODE 45
6352 # define PR_GET_FP_MODE 46
6353 # define PR_FP_MODE_FR   (1 << 0)
6354 # define PR_FP_MODE_FRE  (1 << 1)
6355 #endif
6356 #ifndef PR_SVE_SET_VL
6357 # define PR_SVE_SET_VL  50
6358 # define PR_SVE_GET_VL  51
6359 # define PR_SVE_VL_LEN_MASK  0xffff
6360 # define PR_SVE_VL_INHERIT   (1 << 17)
6361 #endif
6362 #ifndef PR_PAC_RESET_KEYS
6363 # define PR_PAC_RESET_KEYS  54
6364 # define PR_PAC_APIAKEY   (1 << 0)
6365 # define PR_PAC_APIBKEY   (1 << 1)
6366 # define PR_PAC_APDAKEY   (1 << 2)
6367 # define PR_PAC_APDBKEY   (1 << 3)
6368 # define PR_PAC_APGAKEY   (1 << 4)
6369 #endif
6370 #ifndef PR_SET_TAGGED_ADDR_CTRL
6371 # define PR_SET_TAGGED_ADDR_CTRL 55
6372 # define PR_GET_TAGGED_ADDR_CTRL 56
6373 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6374 #endif
6375 #ifndef PR_MTE_TCF_SHIFT
6376 # define PR_MTE_TCF_SHIFT       1
6377 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6378 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6379 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6380 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6381 # define PR_MTE_TAG_SHIFT       3
6382 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6383 #endif
6384 #ifndef PR_SET_IO_FLUSHER
6385 # define PR_SET_IO_FLUSHER 57
6386 # define PR_GET_IO_FLUSHER 58
6387 #endif
6388 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6389 # define PR_SET_SYSCALL_USER_DISPATCH 59
6390 #endif
6391 #ifndef PR_SME_SET_VL
6392 # define PR_SME_SET_VL  63
6393 # define PR_SME_GET_VL  64
6394 # define PR_SME_VL_LEN_MASK  0xffff
6395 # define PR_SME_VL_INHERIT   (1 << 17)
6396 #endif
6397 
6398 #include "target_prctl.h"
6399 
6400 static abi_long do_prctl_inval0(CPUArchState *env)
6401 {
6402     return -TARGET_EINVAL;
6403 }
6404 
6405 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6406 {
6407     return -TARGET_EINVAL;
6408 }
6409 
6410 #ifndef do_prctl_get_fp_mode
6411 #define do_prctl_get_fp_mode do_prctl_inval0
6412 #endif
6413 #ifndef do_prctl_set_fp_mode
6414 #define do_prctl_set_fp_mode do_prctl_inval1
6415 #endif
6416 #ifndef do_prctl_sve_get_vl
6417 #define do_prctl_sve_get_vl do_prctl_inval0
6418 #endif
6419 #ifndef do_prctl_sve_set_vl
6420 #define do_prctl_sve_set_vl do_prctl_inval1
6421 #endif
6422 #ifndef do_prctl_reset_keys
6423 #define do_prctl_reset_keys do_prctl_inval1
6424 #endif
6425 #ifndef do_prctl_set_tagged_addr_ctrl
6426 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6427 #endif
6428 #ifndef do_prctl_get_tagged_addr_ctrl
6429 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6430 #endif
6431 #ifndef do_prctl_get_unalign
6432 #define do_prctl_get_unalign do_prctl_inval1
6433 #endif
6434 #ifndef do_prctl_set_unalign
6435 #define do_prctl_set_unalign do_prctl_inval1
6436 #endif
6437 #ifndef do_prctl_sme_get_vl
6438 #define do_prctl_sme_get_vl do_prctl_inval0
6439 #endif
6440 #ifndef do_prctl_sme_set_vl
6441 #define do_prctl_sme_set_vl do_prctl_inval1
6442 #endif
6443 
6444 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6445                          abi_long arg3, abi_long arg4, abi_long arg5)
6446 {
6447     abi_long ret;
6448 
6449     switch (option) {
6450     case PR_GET_PDEATHSIG:
6451         {
6452             int deathsig;
6453             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6454                                   arg3, arg4, arg5));
6455             if (!is_error(ret) &&
6456                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6457                 return -TARGET_EFAULT;
6458             }
6459             return ret;
6460         }
6461     case PR_SET_PDEATHSIG:
6462         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6463                                arg3, arg4, arg5));
6464     case PR_GET_NAME:
6465         {
6466             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6467             if (!name) {
6468                 return -TARGET_EFAULT;
6469             }
6470             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6471                                   arg3, arg4, arg5));
6472             unlock_user(name, arg2, 16);
6473             return ret;
6474         }
6475     case PR_SET_NAME:
6476         {
6477             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6478             if (!name) {
6479                 return -TARGET_EFAULT;
6480             }
6481             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6482                                   arg3, arg4, arg5));
6483             unlock_user(name, arg2, 0);
6484             return ret;
6485         }
6486     case PR_GET_FP_MODE:
6487         return do_prctl_get_fp_mode(env);
6488     case PR_SET_FP_MODE:
6489         return do_prctl_set_fp_mode(env, arg2);
6490     case PR_SVE_GET_VL:
6491         return do_prctl_sve_get_vl(env);
6492     case PR_SVE_SET_VL:
6493         return do_prctl_sve_set_vl(env, arg2);
6494     case PR_SME_GET_VL:
6495         return do_prctl_sme_get_vl(env);
6496     case PR_SME_SET_VL:
6497         return do_prctl_sme_set_vl(env, arg2);
6498     case PR_PAC_RESET_KEYS:
6499         if (arg3 || arg4 || arg5) {
6500             return -TARGET_EINVAL;
6501         }
6502         return do_prctl_reset_keys(env, arg2);
6503     case PR_SET_TAGGED_ADDR_CTRL:
6504         if (arg3 || arg4 || arg5) {
6505             return -TARGET_EINVAL;
6506         }
6507         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6508     case PR_GET_TAGGED_ADDR_CTRL:
6509         if (arg2 || arg3 || arg4 || arg5) {
6510             return -TARGET_EINVAL;
6511         }
6512         return do_prctl_get_tagged_addr_ctrl(env);
6513 
6514     case PR_GET_UNALIGN:
6515         return do_prctl_get_unalign(env, arg2);
6516     case PR_SET_UNALIGN:
6517         return do_prctl_set_unalign(env, arg2);
6518 
6519     case PR_CAP_AMBIENT:
6520     case PR_CAPBSET_READ:
6521     case PR_CAPBSET_DROP:
6522     case PR_GET_DUMPABLE:
6523     case PR_SET_DUMPABLE:
6524     case PR_GET_KEEPCAPS:
6525     case PR_SET_KEEPCAPS:
6526     case PR_GET_SECUREBITS:
6527     case PR_SET_SECUREBITS:
6528     case PR_GET_TIMING:
6529     case PR_SET_TIMING:
6530     case PR_GET_TIMERSLACK:
6531     case PR_SET_TIMERSLACK:
6532     case PR_MCE_KILL:
6533     case PR_MCE_KILL_GET:
6534     case PR_GET_NO_NEW_PRIVS:
6535     case PR_SET_NO_NEW_PRIVS:
6536     case PR_GET_IO_FLUSHER:
6537     case PR_SET_IO_FLUSHER:
6538         /* Some prctl options have no pointer arguments and we can pass on. */
6539         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6540 
6541     case PR_GET_CHILD_SUBREAPER:
6542     case PR_SET_CHILD_SUBREAPER:
6543     case PR_GET_SPECULATION_CTRL:
6544     case PR_SET_SPECULATION_CTRL:
6545     case PR_GET_TID_ADDRESS:
6546         /* TODO */
6547         return -TARGET_EINVAL;
6548 
6549     case PR_GET_FPEXC:
6550     case PR_SET_FPEXC:
6551         /* Was used for SPE on PowerPC. */
6552         return -TARGET_EINVAL;
6553 
6554     case PR_GET_ENDIAN:
6555     case PR_SET_ENDIAN:
6556     case PR_GET_FPEMU:
6557     case PR_SET_FPEMU:
6558     case PR_SET_MM:
6559     case PR_GET_SECCOMP:
6560     case PR_SET_SECCOMP:
6561     case PR_SET_SYSCALL_USER_DISPATCH:
6562     case PR_GET_THP_DISABLE:
6563     case PR_SET_THP_DISABLE:
6564     case PR_GET_TSC:
6565     case PR_SET_TSC:
6566         /* Disable to prevent the target disabling stuff we need. */
6567         return -TARGET_EINVAL;
6568 
6569     default:
6570         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6571                       option);
6572         return -TARGET_EINVAL;
6573     }
6574 }
6575 
6576 #define NEW_STACK_SIZE 0x40000
6577 
6578 
6579 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6580 typedef struct {
6581     CPUArchState *env;
6582     pthread_mutex_t mutex;
6583     pthread_cond_t cond;
6584     pthread_t thread;
6585     uint32_t tid;
6586     abi_ulong child_tidptr;
6587     abi_ulong parent_tidptr;
6588     sigset_t sigmask;
6589 } new_thread_info;
6590 
6591 static void *clone_func(void *arg)
6592 {
6593     new_thread_info *info = arg;
6594     CPUArchState *env;
6595     CPUState *cpu;
6596     TaskState *ts;
6597 
6598     rcu_register_thread();
6599     tcg_register_thread();
6600     env = info->env;
6601     cpu = env_cpu(env);
6602     thread_cpu = cpu;
6603     ts = (TaskState *)cpu->opaque;
6604     info->tid = sys_gettid();
6605     task_settid(ts);
6606     if (info->child_tidptr)
6607         put_user_u32(info->tid, info->child_tidptr);
6608     if (info->parent_tidptr)
6609         put_user_u32(info->tid, info->parent_tidptr);
6610     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6611     /* Enable signals.  */
6612     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6613     /* Signal to the parent that we're ready.  */
6614     pthread_mutex_lock(&info->mutex);
6615     pthread_cond_broadcast(&info->cond);
6616     pthread_mutex_unlock(&info->mutex);
6617     /* Wait until the parent has finished initializing the tls state.  */
6618     pthread_mutex_lock(&clone_lock);
6619     pthread_mutex_unlock(&clone_lock);
6620     cpu_loop(env);
6621     /* never exits */
6622     return NULL;
6623 }
6624 
6625 /* do_fork() Must return host values and target errnos (unlike most
6626    do_*() functions). */
6627 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6628                    abi_ulong parent_tidptr, target_ulong newtls,
6629                    abi_ulong child_tidptr)
6630 {
6631     CPUState *cpu = env_cpu(env);
6632     int ret;
6633     TaskState *ts;
6634     CPUState *new_cpu;
6635     CPUArchState *new_env;
6636     sigset_t sigmask;
6637 
6638     flags &= ~CLONE_IGNORED_FLAGS;
6639 
6640     /* Emulate vfork() with fork() */
6641     if (flags & CLONE_VFORK)
6642         flags &= ~(CLONE_VFORK | CLONE_VM);
6643 
6644     if (flags & CLONE_VM) {
6645         TaskState *parent_ts = (TaskState *)cpu->opaque;
6646         new_thread_info info;
6647         pthread_attr_t attr;
6648 
6649         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6650             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6651             return -TARGET_EINVAL;
6652         }
6653 
6654         ts = g_new0(TaskState, 1);
6655         init_task_state(ts);
6656 
6657         /* Grab a mutex so that thread setup appears atomic.  */
6658         pthread_mutex_lock(&clone_lock);
6659 
6660         /*
6661          * If this is our first additional thread, we need to ensure we
6662          * generate code for parallel execution and flush old translations.
6663          * Do this now so that the copy gets CF_PARALLEL too.
6664          */
6665         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6666             cpu->tcg_cflags |= CF_PARALLEL;
6667             tb_flush(cpu);
6668         }
6669 
6670         /* we create a new CPU instance. */
6671         new_env = cpu_copy(env);
6672         /* Init regs that differ from the parent.  */
6673         cpu_clone_regs_child(new_env, newsp, flags);
6674         cpu_clone_regs_parent(env, flags);
6675         new_cpu = env_cpu(new_env);
6676         new_cpu->opaque = ts;
6677         ts->bprm = parent_ts->bprm;
6678         ts->info = parent_ts->info;
6679         ts->signal_mask = parent_ts->signal_mask;
6680 
6681         if (flags & CLONE_CHILD_CLEARTID) {
6682             ts->child_tidptr = child_tidptr;
6683         }
6684 
6685         if (flags & CLONE_SETTLS) {
6686             cpu_set_tls (new_env, newtls);
6687         }
6688 
6689         memset(&info, 0, sizeof(info));
6690         pthread_mutex_init(&info.mutex, NULL);
6691         pthread_mutex_lock(&info.mutex);
6692         pthread_cond_init(&info.cond, NULL);
6693         info.env = new_env;
6694         if (flags & CLONE_CHILD_SETTID) {
6695             info.child_tidptr = child_tidptr;
6696         }
6697         if (flags & CLONE_PARENT_SETTID) {
6698             info.parent_tidptr = parent_tidptr;
6699         }
6700 
6701         ret = pthread_attr_init(&attr);
6702         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6703         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6704         /* It is not safe to deliver signals until the child has finished
6705            initializing, so temporarily block all signals.  */
6706         sigfillset(&sigmask);
6707         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6708         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6709 
6710         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6711         /* TODO: Free new CPU state if thread creation failed.  */
6712 
6713         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6714         pthread_attr_destroy(&attr);
6715         if (ret == 0) {
6716             /* Wait for the child to initialize.  */
6717             pthread_cond_wait(&info.cond, &info.mutex);
6718             ret = info.tid;
6719         } else {
6720             ret = -1;
6721         }
6722         pthread_mutex_unlock(&info.mutex);
6723         pthread_cond_destroy(&info.cond);
6724         pthread_mutex_destroy(&info.mutex);
6725         pthread_mutex_unlock(&clone_lock);
6726     } else {
6727         /* if no CLONE_VM, we consider it is a fork */
6728         if (flags & CLONE_INVALID_FORK_FLAGS) {
6729             return -TARGET_EINVAL;
6730         }
6731 
6732         /* We can't support custom termination signals */
6733         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6734             return -TARGET_EINVAL;
6735         }
6736 
6737 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6738         if (flags & CLONE_PIDFD) {
6739             return -TARGET_EINVAL;
6740         }
6741 #endif
6742 
6743         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6744         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6745             return -TARGET_EINVAL;
6746         }
6747 
6748         if (block_signals()) {
6749             return -QEMU_ERESTARTSYS;
6750         }
6751 
6752         fork_start();
6753         ret = fork();
6754         if (ret == 0) {
6755             /* Child Process.  */
6756             cpu_clone_regs_child(env, newsp, flags);
6757             fork_end(1);
6758             /* There is a race condition here.  The parent process could
6759                theoretically read the TID in the child process before the child
6760                tid is set.  This would require using either ptrace
6761                (not implemented) or having *_tidptr to point at a shared memory
6762                mapping.  We can't repeat the spinlock hack used above because
6763                the child process gets its own copy of the lock.  */
6764             if (flags & CLONE_CHILD_SETTID)
6765                 put_user_u32(sys_gettid(), child_tidptr);
6766             if (flags & CLONE_PARENT_SETTID)
6767                 put_user_u32(sys_gettid(), parent_tidptr);
6768             ts = (TaskState *)cpu->opaque;
6769             if (flags & CLONE_SETTLS)
6770                 cpu_set_tls (env, newtls);
6771             if (flags & CLONE_CHILD_CLEARTID)
6772                 ts->child_tidptr = child_tidptr;
6773         } else {
6774             cpu_clone_regs_parent(env, flags);
6775             if (flags & CLONE_PIDFD) {
6776                 int pid_fd = 0;
6777 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6778                 int pid_child = ret;
6779                 pid_fd = pidfd_open(pid_child, 0);
6780                 if (pid_fd >= 0) {
6781                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6782                                                | FD_CLOEXEC);
6783                 } else {
6784                         pid_fd = 0;
6785                 }
6786 #endif
6787                 put_user_u32(pid_fd, parent_tidptr);
6788                 }
6789             fork_end(0);
6790         }
6791         g_assert(!cpu_in_exclusive_context(cpu));
6792     }
6793     return ret;
6794 }
6795 
6796 /* warning : doesn't handle linux specific flags... */
6797 static int target_to_host_fcntl_cmd(int cmd)
6798 {
6799     int ret;
6800 
6801     switch(cmd) {
6802     case TARGET_F_DUPFD:
6803     case TARGET_F_GETFD:
6804     case TARGET_F_SETFD:
6805     case TARGET_F_GETFL:
6806     case TARGET_F_SETFL:
6807     case TARGET_F_OFD_GETLK:
6808     case TARGET_F_OFD_SETLK:
6809     case TARGET_F_OFD_SETLKW:
6810         ret = cmd;
6811         break;
6812     case TARGET_F_GETLK:
6813         ret = F_GETLK64;
6814         break;
6815     case TARGET_F_SETLK:
6816         ret = F_SETLK64;
6817         break;
6818     case TARGET_F_SETLKW:
6819         ret = F_SETLKW64;
6820         break;
6821     case TARGET_F_GETOWN:
6822         ret = F_GETOWN;
6823         break;
6824     case TARGET_F_SETOWN:
6825         ret = F_SETOWN;
6826         break;
6827     case TARGET_F_GETSIG:
6828         ret = F_GETSIG;
6829         break;
6830     case TARGET_F_SETSIG:
6831         ret = F_SETSIG;
6832         break;
6833 #if TARGET_ABI_BITS == 32
6834     case TARGET_F_GETLK64:
6835         ret = F_GETLK64;
6836         break;
6837     case TARGET_F_SETLK64:
6838         ret = F_SETLK64;
6839         break;
6840     case TARGET_F_SETLKW64:
6841         ret = F_SETLKW64;
6842         break;
6843 #endif
6844     case TARGET_F_SETLEASE:
6845         ret = F_SETLEASE;
6846         break;
6847     case TARGET_F_GETLEASE:
6848         ret = F_GETLEASE;
6849         break;
6850 #ifdef F_DUPFD_CLOEXEC
6851     case TARGET_F_DUPFD_CLOEXEC:
6852         ret = F_DUPFD_CLOEXEC;
6853         break;
6854 #endif
6855     case TARGET_F_NOTIFY:
6856         ret = F_NOTIFY;
6857         break;
6858 #ifdef F_GETOWN_EX
6859     case TARGET_F_GETOWN_EX:
6860         ret = F_GETOWN_EX;
6861         break;
6862 #endif
6863 #ifdef F_SETOWN_EX
6864     case TARGET_F_SETOWN_EX:
6865         ret = F_SETOWN_EX;
6866         break;
6867 #endif
6868 #ifdef F_SETPIPE_SZ
6869     case TARGET_F_SETPIPE_SZ:
6870         ret = F_SETPIPE_SZ;
6871         break;
6872     case TARGET_F_GETPIPE_SZ:
6873         ret = F_GETPIPE_SZ;
6874         break;
6875 #endif
6876 #ifdef F_ADD_SEALS
6877     case TARGET_F_ADD_SEALS:
6878         ret = F_ADD_SEALS;
6879         break;
6880     case TARGET_F_GET_SEALS:
6881         ret = F_GET_SEALS;
6882         break;
6883 #endif
6884     default:
6885         ret = -TARGET_EINVAL;
6886         break;
6887     }
6888 
6889 #if defined(__powerpc64__)
6890     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6891      * is not supported by kernel. The glibc fcntl call actually adjusts
6892      * them to 5, 6 and 7 before making the syscall(). Since we make the
6893      * syscall directly, adjust to what is supported by the kernel.
6894      */
6895     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6896         ret -= F_GETLK64 - 5;
6897     }
6898 #endif
6899 
6900     return ret;
6901 }
6902 
6903 #define FLOCK_TRANSTBL \
6904     switch (type) { \
6905     TRANSTBL_CONVERT(F_RDLCK); \
6906     TRANSTBL_CONVERT(F_WRLCK); \
6907     TRANSTBL_CONVERT(F_UNLCK); \
6908     }
6909 
6910 static int target_to_host_flock(int type)
6911 {
6912 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6913     FLOCK_TRANSTBL
6914 #undef  TRANSTBL_CONVERT
6915     return -TARGET_EINVAL;
6916 }
6917 
6918 static int host_to_target_flock(int type)
6919 {
6920 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6921     FLOCK_TRANSTBL
6922 #undef  TRANSTBL_CONVERT
6923     /* if we don't know how to convert the value coming
6924      * from the host we copy to the target field as-is
6925      */
6926     return type;
6927 }
6928 
6929 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6930                                             abi_ulong target_flock_addr)
6931 {
6932     struct target_flock *target_fl;
6933     int l_type;
6934 
6935     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6936         return -TARGET_EFAULT;
6937     }
6938 
6939     __get_user(l_type, &target_fl->l_type);
6940     l_type = target_to_host_flock(l_type);
6941     if (l_type < 0) {
6942         return l_type;
6943     }
6944     fl->l_type = l_type;
6945     __get_user(fl->l_whence, &target_fl->l_whence);
6946     __get_user(fl->l_start, &target_fl->l_start);
6947     __get_user(fl->l_len, &target_fl->l_len);
6948     __get_user(fl->l_pid, &target_fl->l_pid);
6949     unlock_user_struct(target_fl, target_flock_addr, 0);
6950     return 0;
6951 }
6952 
6953 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6954                                           const struct flock64 *fl)
6955 {
6956     struct target_flock *target_fl;
6957     short l_type;
6958 
6959     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6960         return -TARGET_EFAULT;
6961     }
6962 
6963     l_type = host_to_target_flock(fl->l_type);
6964     __put_user(l_type, &target_fl->l_type);
6965     __put_user(fl->l_whence, &target_fl->l_whence);
6966     __put_user(fl->l_start, &target_fl->l_start);
6967     __put_user(fl->l_len, &target_fl->l_len);
6968     __put_user(fl->l_pid, &target_fl->l_pid);
6969     unlock_user_struct(target_fl, target_flock_addr, 1);
6970     return 0;
6971 }
6972 
6973 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6974 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6975 
6976 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6977 struct target_oabi_flock64 {
6978     abi_short l_type;
6979     abi_short l_whence;
6980     abi_llong l_start;
6981     abi_llong l_len;
6982     abi_int   l_pid;
6983 } QEMU_PACKED;
6984 
6985 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6986                                                    abi_ulong target_flock_addr)
6987 {
6988     struct target_oabi_flock64 *target_fl;
6989     int l_type;
6990 
6991     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6992         return -TARGET_EFAULT;
6993     }
6994 
6995     __get_user(l_type, &target_fl->l_type);
6996     l_type = target_to_host_flock(l_type);
6997     if (l_type < 0) {
6998         return l_type;
6999     }
7000     fl->l_type = l_type;
7001     __get_user(fl->l_whence, &target_fl->l_whence);
7002     __get_user(fl->l_start, &target_fl->l_start);
7003     __get_user(fl->l_len, &target_fl->l_len);
7004     __get_user(fl->l_pid, &target_fl->l_pid);
7005     unlock_user_struct(target_fl, target_flock_addr, 0);
7006     return 0;
7007 }
7008 
7009 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7010                                                  const struct flock64 *fl)
7011 {
7012     struct target_oabi_flock64 *target_fl;
7013     short l_type;
7014 
7015     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7016         return -TARGET_EFAULT;
7017     }
7018 
7019     l_type = host_to_target_flock(fl->l_type);
7020     __put_user(l_type, &target_fl->l_type);
7021     __put_user(fl->l_whence, &target_fl->l_whence);
7022     __put_user(fl->l_start, &target_fl->l_start);
7023     __put_user(fl->l_len, &target_fl->l_len);
7024     __put_user(fl->l_pid, &target_fl->l_pid);
7025     unlock_user_struct(target_fl, target_flock_addr, 1);
7026     return 0;
7027 }
7028 #endif
7029 
7030 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7031                                               abi_ulong target_flock_addr)
7032 {
7033     struct target_flock64 *target_fl;
7034     int l_type;
7035 
7036     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7037         return -TARGET_EFAULT;
7038     }
7039 
7040     __get_user(l_type, &target_fl->l_type);
7041     l_type = target_to_host_flock(l_type);
7042     if (l_type < 0) {
7043         return l_type;
7044     }
7045     fl->l_type = l_type;
7046     __get_user(fl->l_whence, &target_fl->l_whence);
7047     __get_user(fl->l_start, &target_fl->l_start);
7048     __get_user(fl->l_len, &target_fl->l_len);
7049     __get_user(fl->l_pid, &target_fl->l_pid);
7050     unlock_user_struct(target_fl, target_flock_addr, 0);
7051     return 0;
7052 }
7053 
7054 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7055                                             const struct flock64 *fl)
7056 {
7057     struct target_flock64 *target_fl;
7058     short l_type;
7059 
7060     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7061         return -TARGET_EFAULT;
7062     }
7063 
7064     l_type = host_to_target_flock(fl->l_type);
7065     __put_user(l_type, &target_fl->l_type);
7066     __put_user(fl->l_whence, &target_fl->l_whence);
7067     __put_user(fl->l_start, &target_fl->l_start);
7068     __put_user(fl->l_len, &target_fl->l_len);
7069     __put_user(fl->l_pid, &target_fl->l_pid);
7070     unlock_user_struct(target_fl, target_flock_addr, 1);
7071     return 0;
7072 }
7073 
7074 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7075 {
7076     struct flock64 fl64;
7077 #ifdef F_GETOWN_EX
7078     struct f_owner_ex fox;
7079     struct target_f_owner_ex *target_fox;
7080 #endif
7081     abi_long ret;
7082     int host_cmd = target_to_host_fcntl_cmd(cmd);
7083 
7084     if (host_cmd == -TARGET_EINVAL)
7085 	    return host_cmd;
7086 
7087     switch(cmd) {
7088     case TARGET_F_GETLK:
7089         ret = copy_from_user_flock(&fl64, arg);
7090         if (ret) {
7091             return ret;
7092         }
7093         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7094         if (ret == 0) {
7095             ret = copy_to_user_flock(arg, &fl64);
7096         }
7097         break;
7098 
7099     case TARGET_F_SETLK:
7100     case TARGET_F_SETLKW:
7101         ret = copy_from_user_flock(&fl64, arg);
7102         if (ret) {
7103             return ret;
7104         }
7105         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7106         break;
7107 
7108     case TARGET_F_GETLK64:
7109     case TARGET_F_OFD_GETLK:
7110         ret = copy_from_user_flock64(&fl64, arg);
7111         if (ret) {
7112             return ret;
7113         }
7114         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7115         if (ret == 0) {
7116             ret = copy_to_user_flock64(arg, &fl64);
7117         }
7118         break;
7119     case TARGET_F_SETLK64:
7120     case TARGET_F_SETLKW64:
7121     case TARGET_F_OFD_SETLK:
7122     case TARGET_F_OFD_SETLKW:
7123         ret = copy_from_user_flock64(&fl64, arg);
7124         if (ret) {
7125             return ret;
7126         }
7127         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7128         break;
7129 
7130     case TARGET_F_GETFL:
7131         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7132         if (ret >= 0) {
7133             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7134         }
7135         break;
7136 
7137     case TARGET_F_SETFL:
7138         ret = get_errno(safe_fcntl(fd, host_cmd,
7139                                    target_to_host_bitmask(arg,
7140                                                           fcntl_flags_tbl)));
7141         break;
7142 
7143 #ifdef F_GETOWN_EX
7144     case TARGET_F_GETOWN_EX:
7145         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7146         if (ret >= 0) {
7147             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7148                 return -TARGET_EFAULT;
7149             target_fox->type = tswap32(fox.type);
7150             target_fox->pid = tswap32(fox.pid);
7151             unlock_user_struct(target_fox, arg, 1);
7152         }
7153         break;
7154 #endif
7155 
7156 #ifdef F_SETOWN_EX
7157     case TARGET_F_SETOWN_EX:
7158         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7159             return -TARGET_EFAULT;
7160         fox.type = tswap32(target_fox->type);
7161         fox.pid = tswap32(target_fox->pid);
7162         unlock_user_struct(target_fox, arg, 0);
7163         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7164         break;
7165 #endif
7166 
7167     case TARGET_F_SETSIG:
7168         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7169         break;
7170 
7171     case TARGET_F_GETSIG:
7172         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7173         break;
7174 
7175     case TARGET_F_SETOWN:
7176     case TARGET_F_GETOWN:
7177     case TARGET_F_SETLEASE:
7178     case TARGET_F_GETLEASE:
7179     case TARGET_F_SETPIPE_SZ:
7180     case TARGET_F_GETPIPE_SZ:
7181     case TARGET_F_ADD_SEALS:
7182     case TARGET_F_GET_SEALS:
7183         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7184         break;
7185 
7186     default:
7187         ret = get_errno(safe_fcntl(fd, cmd, arg));
7188         break;
7189     }
7190     return ret;
7191 }
7192 
7193 #ifdef USE_UID16
7194 
7195 static inline int high2lowuid(int uid)
7196 {
7197     if (uid > 65535)
7198         return 65534;
7199     else
7200         return uid;
7201 }
7202 
7203 static inline int high2lowgid(int gid)
7204 {
7205     if (gid > 65535)
7206         return 65534;
7207     else
7208         return gid;
7209 }
7210 
7211 static inline int low2highuid(int uid)
7212 {
7213     if ((int16_t)uid == -1)
7214         return -1;
7215     else
7216         return uid;
7217 }
7218 
7219 static inline int low2highgid(int gid)
7220 {
7221     if ((int16_t)gid == -1)
7222         return -1;
7223     else
7224         return gid;
7225 }
7226 static inline int tswapid(int id)
7227 {
7228     return tswap16(id);
7229 }
7230 
7231 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7232 
7233 #else /* !USE_UID16 */
7234 static inline int high2lowuid(int uid)
7235 {
7236     return uid;
7237 }
7238 static inline int high2lowgid(int gid)
7239 {
7240     return gid;
7241 }
7242 static inline int low2highuid(int uid)
7243 {
7244     return uid;
7245 }
7246 static inline int low2highgid(int gid)
7247 {
7248     return gid;
7249 }
7250 static inline int tswapid(int id)
7251 {
7252     return tswap32(id);
7253 }
7254 
7255 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7256 
7257 #endif /* USE_UID16 */
7258 
7259 /* We must do direct syscalls for setting UID/GID, because we want to
7260  * implement the Linux system call semantics of "change only for this thread",
7261  * not the libc/POSIX semantics of "change for all threads in process".
7262  * (See http://ewontfix.com/17/ for more details.)
7263  * We use the 32-bit version of the syscalls if present; if it is not
7264  * then either the host architecture supports 32-bit UIDs natively with
7265  * the standard syscall, or the 16-bit UID is the best we can do.
7266  */
7267 #ifdef __NR_setuid32
7268 #define __NR_sys_setuid __NR_setuid32
7269 #else
7270 #define __NR_sys_setuid __NR_setuid
7271 #endif
7272 #ifdef __NR_setgid32
7273 #define __NR_sys_setgid __NR_setgid32
7274 #else
7275 #define __NR_sys_setgid __NR_setgid
7276 #endif
7277 #ifdef __NR_setresuid32
7278 #define __NR_sys_setresuid __NR_setresuid32
7279 #else
7280 #define __NR_sys_setresuid __NR_setresuid
7281 #endif
7282 #ifdef __NR_setresgid32
7283 #define __NR_sys_setresgid __NR_setresgid32
7284 #else
7285 #define __NR_sys_setresgid __NR_setresgid
7286 #endif
7287 
7288 _syscall1(int, sys_setuid, uid_t, uid)
7289 _syscall1(int, sys_setgid, gid_t, gid)
7290 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7291 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7292 
7293 void syscall_init(void)
7294 {
7295     IOCTLEntry *ie;
7296     const argtype *arg_type;
7297     int size;
7298 
7299     thunk_init(STRUCT_MAX);
7300 
7301 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7302 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7303 #include "syscall_types.h"
7304 #undef STRUCT
7305 #undef STRUCT_SPECIAL
7306 
7307     /* we patch the ioctl size if necessary. We rely on the fact that
7308        no ioctl has all the bits at '1' in the size field */
7309     ie = ioctl_entries;
7310     while (ie->target_cmd != 0) {
7311         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7312             TARGET_IOC_SIZEMASK) {
7313             arg_type = ie->arg_type;
7314             if (arg_type[0] != TYPE_PTR) {
7315                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7316                         ie->target_cmd);
7317                 exit(1);
7318             }
7319             arg_type++;
7320             size = thunk_type_size(arg_type, 0);
7321             ie->target_cmd = (ie->target_cmd &
7322                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7323                 (size << TARGET_IOC_SIZESHIFT);
7324         }
7325 
7326         /* automatic consistency check if same arch */
7327 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7328     (defined(__x86_64__) && defined(TARGET_X86_64))
7329         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7330             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7331                     ie->name, ie->target_cmd, ie->host_cmd);
7332         }
7333 #endif
7334         ie++;
7335     }
7336 }
7337 
7338 #ifdef TARGET_NR_truncate64
7339 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7340                                          abi_long arg2,
7341                                          abi_long arg3,
7342                                          abi_long arg4)
7343 {
7344     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7345         arg2 = arg3;
7346         arg3 = arg4;
7347     }
7348     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7349 }
7350 #endif
7351 
7352 #ifdef TARGET_NR_ftruncate64
7353 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7354                                           abi_long arg2,
7355                                           abi_long arg3,
7356                                           abi_long arg4)
7357 {
7358     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7359         arg2 = arg3;
7360         arg3 = arg4;
7361     }
7362     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7363 }
7364 #endif
7365 
7366 #if defined(TARGET_NR_timer_settime) || \
7367     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7368 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7369                                                  abi_ulong target_addr)
7370 {
7371     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7372                                 offsetof(struct target_itimerspec,
7373                                          it_interval)) ||
7374         target_to_host_timespec(&host_its->it_value, target_addr +
7375                                 offsetof(struct target_itimerspec,
7376                                          it_value))) {
7377         return -TARGET_EFAULT;
7378     }
7379 
7380     return 0;
7381 }
7382 #endif
7383 
7384 #if defined(TARGET_NR_timer_settime64) || \
7385     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7386 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7387                                                    abi_ulong target_addr)
7388 {
7389     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7390                                   offsetof(struct target__kernel_itimerspec,
7391                                            it_interval)) ||
7392         target_to_host_timespec64(&host_its->it_value, target_addr +
7393                                   offsetof(struct target__kernel_itimerspec,
7394                                            it_value))) {
7395         return -TARGET_EFAULT;
7396     }
7397 
7398     return 0;
7399 }
7400 #endif
7401 
7402 #if ((defined(TARGET_NR_timerfd_gettime) || \
7403       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7404       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7405 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7406                                                  struct itimerspec *host_its)
7407 {
7408     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7409                                                        it_interval),
7410                                 &host_its->it_interval) ||
7411         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7412                                                        it_value),
7413                                 &host_its->it_value)) {
7414         return -TARGET_EFAULT;
7415     }
7416     return 0;
7417 }
7418 #endif
7419 
7420 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7421       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7422       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7423 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7424                                                    struct itimerspec *host_its)
7425 {
7426     if (host_to_target_timespec64(target_addr +
7427                                   offsetof(struct target__kernel_itimerspec,
7428                                            it_interval),
7429                                   &host_its->it_interval) ||
7430         host_to_target_timespec64(target_addr +
7431                                   offsetof(struct target__kernel_itimerspec,
7432                                            it_value),
7433                                   &host_its->it_value)) {
7434         return -TARGET_EFAULT;
7435     }
7436     return 0;
7437 }
7438 #endif
7439 
7440 #if defined(TARGET_NR_adjtimex) || \
7441     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7442 static inline abi_long target_to_host_timex(struct timex *host_tx,
7443                                             abi_long target_addr)
7444 {
7445     struct target_timex *target_tx;
7446 
7447     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7448         return -TARGET_EFAULT;
7449     }
7450 
7451     __get_user(host_tx->modes, &target_tx->modes);
7452     __get_user(host_tx->offset, &target_tx->offset);
7453     __get_user(host_tx->freq, &target_tx->freq);
7454     __get_user(host_tx->maxerror, &target_tx->maxerror);
7455     __get_user(host_tx->esterror, &target_tx->esterror);
7456     __get_user(host_tx->status, &target_tx->status);
7457     __get_user(host_tx->constant, &target_tx->constant);
7458     __get_user(host_tx->precision, &target_tx->precision);
7459     __get_user(host_tx->tolerance, &target_tx->tolerance);
7460     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7461     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7462     __get_user(host_tx->tick, &target_tx->tick);
7463     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7464     __get_user(host_tx->jitter, &target_tx->jitter);
7465     __get_user(host_tx->shift, &target_tx->shift);
7466     __get_user(host_tx->stabil, &target_tx->stabil);
7467     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7468     __get_user(host_tx->calcnt, &target_tx->calcnt);
7469     __get_user(host_tx->errcnt, &target_tx->errcnt);
7470     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7471     __get_user(host_tx->tai, &target_tx->tai);
7472 
7473     unlock_user_struct(target_tx, target_addr, 0);
7474     return 0;
7475 }
7476 
7477 static inline abi_long host_to_target_timex(abi_long target_addr,
7478                                             struct timex *host_tx)
7479 {
7480     struct target_timex *target_tx;
7481 
7482     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7483         return -TARGET_EFAULT;
7484     }
7485 
7486     __put_user(host_tx->modes, &target_tx->modes);
7487     __put_user(host_tx->offset, &target_tx->offset);
7488     __put_user(host_tx->freq, &target_tx->freq);
7489     __put_user(host_tx->maxerror, &target_tx->maxerror);
7490     __put_user(host_tx->esterror, &target_tx->esterror);
7491     __put_user(host_tx->status, &target_tx->status);
7492     __put_user(host_tx->constant, &target_tx->constant);
7493     __put_user(host_tx->precision, &target_tx->precision);
7494     __put_user(host_tx->tolerance, &target_tx->tolerance);
7495     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7496     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7497     __put_user(host_tx->tick, &target_tx->tick);
7498     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7499     __put_user(host_tx->jitter, &target_tx->jitter);
7500     __put_user(host_tx->shift, &target_tx->shift);
7501     __put_user(host_tx->stabil, &target_tx->stabil);
7502     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7503     __put_user(host_tx->calcnt, &target_tx->calcnt);
7504     __put_user(host_tx->errcnt, &target_tx->errcnt);
7505     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7506     __put_user(host_tx->tai, &target_tx->tai);
7507 
7508     unlock_user_struct(target_tx, target_addr, 1);
7509     return 0;
7510 }
7511 #endif
7512 
7513 
7514 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7515 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7516                                               abi_long target_addr)
7517 {
7518     struct target__kernel_timex *target_tx;
7519 
7520     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7521                                  offsetof(struct target__kernel_timex,
7522                                           time))) {
7523         return -TARGET_EFAULT;
7524     }
7525 
7526     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7527         return -TARGET_EFAULT;
7528     }
7529 
7530     __get_user(host_tx->modes, &target_tx->modes);
7531     __get_user(host_tx->offset, &target_tx->offset);
7532     __get_user(host_tx->freq, &target_tx->freq);
7533     __get_user(host_tx->maxerror, &target_tx->maxerror);
7534     __get_user(host_tx->esterror, &target_tx->esterror);
7535     __get_user(host_tx->status, &target_tx->status);
7536     __get_user(host_tx->constant, &target_tx->constant);
7537     __get_user(host_tx->precision, &target_tx->precision);
7538     __get_user(host_tx->tolerance, &target_tx->tolerance);
7539     __get_user(host_tx->tick, &target_tx->tick);
7540     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7541     __get_user(host_tx->jitter, &target_tx->jitter);
7542     __get_user(host_tx->shift, &target_tx->shift);
7543     __get_user(host_tx->stabil, &target_tx->stabil);
7544     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7545     __get_user(host_tx->calcnt, &target_tx->calcnt);
7546     __get_user(host_tx->errcnt, &target_tx->errcnt);
7547     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7548     __get_user(host_tx->tai, &target_tx->tai);
7549 
7550     unlock_user_struct(target_tx, target_addr, 0);
7551     return 0;
7552 }
7553 
7554 static inline abi_long host_to_target_timex64(abi_long target_addr,
7555                                               struct timex *host_tx)
7556 {
7557     struct target__kernel_timex *target_tx;
7558 
7559    if (copy_to_user_timeval64(target_addr +
7560                               offsetof(struct target__kernel_timex, time),
7561                               &host_tx->time)) {
7562         return -TARGET_EFAULT;
7563     }
7564 
7565     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7566         return -TARGET_EFAULT;
7567     }
7568 
7569     __put_user(host_tx->modes, &target_tx->modes);
7570     __put_user(host_tx->offset, &target_tx->offset);
7571     __put_user(host_tx->freq, &target_tx->freq);
7572     __put_user(host_tx->maxerror, &target_tx->maxerror);
7573     __put_user(host_tx->esterror, &target_tx->esterror);
7574     __put_user(host_tx->status, &target_tx->status);
7575     __put_user(host_tx->constant, &target_tx->constant);
7576     __put_user(host_tx->precision, &target_tx->precision);
7577     __put_user(host_tx->tolerance, &target_tx->tolerance);
7578     __put_user(host_tx->tick, &target_tx->tick);
7579     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7580     __put_user(host_tx->jitter, &target_tx->jitter);
7581     __put_user(host_tx->shift, &target_tx->shift);
7582     __put_user(host_tx->stabil, &target_tx->stabil);
7583     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7584     __put_user(host_tx->calcnt, &target_tx->calcnt);
7585     __put_user(host_tx->errcnt, &target_tx->errcnt);
7586     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7587     __put_user(host_tx->tai, &target_tx->tai);
7588 
7589     unlock_user_struct(target_tx, target_addr, 1);
7590     return 0;
7591 }
7592 #endif
7593 
7594 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7595 #define sigev_notify_thread_id _sigev_un._tid
7596 #endif
7597 
7598 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7599                                                abi_ulong target_addr)
7600 {
7601     struct target_sigevent *target_sevp;
7602 
7603     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7604         return -TARGET_EFAULT;
7605     }
7606 
7607     /* This union is awkward on 64 bit systems because it has a 32 bit
7608      * integer and a pointer in it; we follow the conversion approach
7609      * used for handling sigval types in signal.c so the guest should get
7610      * the correct value back even if we did a 64 bit byteswap and it's
7611      * using the 32 bit integer.
7612      */
7613     host_sevp->sigev_value.sival_ptr =
7614         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7615     host_sevp->sigev_signo =
7616         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7617     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7618     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7619 
7620     unlock_user_struct(target_sevp, target_addr, 1);
7621     return 0;
7622 }
7623 
7624 #if defined(TARGET_NR_mlockall)
7625 static inline int target_to_host_mlockall_arg(int arg)
7626 {
7627     int result = 0;
7628 
7629     if (arg & TARGET_MCL_CURRENT) {
7630         result |= MCL_CURRENT;
7631     }
7632     if (arg & TARGET_MCL_FUTURE) {
7633         result |= MCL_FUTURE;
7634     }
7635 #ifdef MCL_ONFAULT
7636     if (arg & TARGET_MCL_ONFAULT) {
7637         result |= MCL_ONFAULT;
7638     }
7639 #endif
7640 
7641     return result;
7642 }
7643 #endif
7644 
7645 static inline int target_to_host_msync_arg(abi_long arg)
7646 {
7647     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7648            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7649            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7650            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7651 }
7652 
7653 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7654      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7655      defined(TARGET_NR_newfstatat))
7656 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7657                                              abi_ulong target_addr,
7658                                              struct stat *host_st)
7659 {
7660 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7661     if (cpu_env->eabi) {
7662         struct target_eabi_stat64 *target_st;
7663 
7664         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7665             return -TARGET_EFAULT;
7666         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7667         __put_user(host_st->st_dev, &target_st->st_dev);
7668         __put_user(host_st->st_ino, &target_st->st_ino);
7669 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7670         __put_user(host_st->st_ino, &target_st->__st_ino);
7671 #endif
7672         __put_user(host_st->st_mode, &target_st->st_mode);
7673         __put_user(host_st->st_nlink, &target_st->st_nlink);
7674         __put_user(host_st->st_uid, &target_st->st_uid);
7675         __put_user(host_st->st_gid, &target_st->st_gid);
7676         __put_user(host_st->st_rdev, &target_st->st_rdev);
7677         __put_user(host_st->st_size, &target_st->st_size);
7678         __put_user(host_st->st_blksize, &target_st->st_blksize);
7679         __put_user(host_st->st_blocks, &target_st->st_blocks);
7680         __put_user(host_st->st_atime, &target_st->target_st_atime);
7681         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7682         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7683 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7684         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7685         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7686         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7687 #endif
7688         unlock_user_struct(target_st, target_addr, 1);
7689     } else
7690 #endif
7691     {
7692 #if defined(TARGET_HAS_STRUCT_STAT64)
7693         struct target_stat64 *target_st;
7694 #else
7695         struct target_stat *target_st;
7696 #endif
7697 
7698         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7699             return -TARGET_EFAULT;
7700         memset(target_st, 0, sizeof(*target_st));
7701         __put_user(host_st->st_dev, &target_st->st_dev);
7702         __put_user(host_st->st_ino, &target_st->st_ino);
7703 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7704         __put_user(host_st->st_ino, &target_st->__st_ino);
7705 #endif
7706         __put_user(host_st->st_mode, &target_st->st_mode);
7707         __put_user(host_st->st_nlink, &target_st->st_nlink);
7708         __put_user(host_st->st_uid, &target_st->st_uid);
7709         __put_user(host_st->st_gid, &target_st->st_gid);
7710         __put_user(host_st->st_rdev, &target_st->st_rdev);
7711         /* XXX: better use of kernel struct */
7712         __put_user(host_st->st_size, &target_st->st_size);
7713         __put_user(host_st->st_blksize, &target_st->st_blksize);
7714         __put_user(host_st->st_blocks, &target_st->st_blocks);
7715         __put_user(host_st->st_atime, &target_st->target_st_atime);
7716         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7717         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7718 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7719         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7720         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7721         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7722 #endif
7723         unlock_user_struct(target_st, target_addr, 1);
7724     }
7725 
7726     return 0;
7727 }
7728 #endif
7729 
7730 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7731 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7732                                             abi_ulong target_addr)
7733 {
7734     struct target_statx *target_stx;
7735 
7736     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7737         return -TARGET_EFAULT;
7738     }
7739     memset(target_stx, 0, sizeof(*target_stx));
7740 
7741     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7742     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7743     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7744     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7745     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7746     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7747     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7748     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7749     __put_user(host_stx->stx_size, &target_stx->stx_size);
7750     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7751     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7752     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7753     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7754     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7755     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7756     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7757     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7758     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7759     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7760     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7761     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7762     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7763     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7764 
7765     unlock_user_struct(target_stx, target_addr, 1);
7766 
7767     return 0;
7768 }
7769 #endif
7770 
7771 static int do_sys_futex(int *uaddr, int op, int val,
7772                          const struct timespec *timeout, int *uaddr2,
7773                          int val3)
7774 {
7775 #if HOST_LONG_BITS == 64
7776 #if defined(__NR_futex)
7777     /* always a 64-bit time_t, it doesn't define _time64 version  */
7778     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7779 
7780 #endif
7781 #else /* HOST_LONG_BITS == 64 */
7782 #if defined(__NR_futex_time64)
7783     if (sizeof(timeout->tv_sec) == 8) {
7784         /* _time64 function on 32bit arch */
7785         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7786     }
7787 #endif
7788 #if defined(__NR_futex)
7789     /* old function on 32bit arch */
7790     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7791 #endif
7792 #endif /* HOST_LONG_BITS == 64 */
7793     g_assert_not_reached();
7794 }
7795 
7796 static int do_safe_futex(int *uaddr, int op, int val,
7797                          const struct timespec *timeout, int *uaddr2,
7798                          int val3)
7799 {
7800 #if HOST_LONG_BITS == 64
7801 #if defined(__NR_futex)
7802     /* always a 64-bit time_t, it doesn't define _time64 version  */
7803     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7804 #endif
7805 #else /* HOST_LONG_BITS == 64 */
7806 #if defined(__NR_futex_time64)
7807     if (sizeof(timeout->tv_sec) == 8) {
7808         /* _time64 function on 32bit arch */
7809         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7810                                            val3));
7811     }
7812 #endif
7813 #if defined(__NR_futex)
7814     /* old function on 32bit arch */
7815     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7816 #endif
7817 #endif /* HOST_LONG_BITS == 64 */
7818     return -TARGET_ENOSYS;
7819 }
7820 
7821 /* ??? Using host futex calls even when target atomic operations
7822    are not really atomic probably breaks things.  However implementing
7823    futexes locally would make futexes shared between multiple processes
7824    tricky.  However they're probably useless because guest atomic
7825    operations won't work either.  */
7826 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7827 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7828                     int op, int val, target_ulong timeout,
7829                     target_ulong uaddr2, int val3)
7830 {
7831     struct timespec ts, *pts = NULL;
7832     void *haddr2 = NULL;
7833     int base_op;
7834 
7835     /* We assume FUTEX_* constants are the same on both host and target. */
7836 #ifdef FUTEX_CMD_MASK
7837     base_op = op & FUTEX_CMD_MASK;
7838 #else
7839     base_op = op;
7840 #endif
7841     switch (base_op) {
7842     case FUTEX_WAIT:
7843     case FUTEX_WAIT_BITSET:
7844         val = tswap32(val);
7845         break;
7846     case FUTEX_WAIT_REQUEUE_PI:
7847         val = tswap32(val);
7848         haddr2 = g2h(cpu, uaddr2);
7849         break;
7850     case FUTEX_LOCK_PI:
7851     case FUTEX_LOCK_PI2:
7852         break;
7853     case FUTEX_WAKE:
7854     case FUTEX_WAKE_BITSET:
7855     case FUTEX_TRYLOCK_PI:
7856     case FUTEX_UNLOCK_PI:
7857         timeout = 0;
7858         break;
7859     case FUTEX_FD:
7860         val = target_to_host_signal(val);
7861         timeout = 0;
7862         break;
7863     case FUTEX_CMP_REQUEUE:
7864     case FUTEX_CMP_REQUEUE_PI:
7865         val3 = tswap32(val3);
7866         /* fall through */
7867     case FUTEX_REQUEUE:
7868     case FUTEX_WAKE_OP:
7869         /*
7870          * For these, the 4th argument is not TIMEOUT, but VAL2.
7871          * But the prototype of do_safe_futex takes a pointer, so
7872          * insert casts to satisfy the compiler.  We do not need
7873          * to tswap VAL2 since it's not compared to guest memory.
7874           */
7875         pts = (struct timespec *)(uintptr_t)timeout;
7876         timeout = 0;
7877         haddr2 = g2h(cpu, uaddr2);
7878         break;
7879     default:
7880         return -TARGET_ENOSYS;
7881     }
7882     if (timeout) {
7883         pts = &ts;
7884         if (time64
7885             ? target_to_host_timespec64(pts, timeout)
7886             : target_to_host_timespec(pts, timeout)) {
7887             return -TARGET_EFAULT;
7888         }
7889     }
7890     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7891 }
7892 #endif
7893 
7894 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7895 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7896                                      abi_long handle, abi_long mount_id,
7897                                      abi_long flags)
7898 {
7899     struct file_handle *target_fh;
7900     struct file_handle *fh;
7901     int mid = 0;
7902     abi_long ret;
7903     char *name;
7904     unsigned int size, total_size;
7905 
7906     if (get_user_s32(size, handle)) {
7907         return -TARGET_EFAULT;
7908     }
7909 
7910     name = lock_user_string(pathname);
7911     if (!name) {
7912         return -TARGET_EFAULT;
7913     }
7914 
7915     total_size = sizeof(struct file_handle) + size;
7916     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7917     if (!target_fh) {
7918         unlock_user(name, pathname, 0);
7919         return -TARGET_EFAULT;
7920     }
7921 
7922     fh = g_malloc0(total_size);
7923     fh->handle_bytes = size;
7924 
7925     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7926     unlock_user(name, pathname, 0);
7927 
7928     /* man name_to_handle_at(2):
7929      * Other than the use of the handle_bytes field, the caller should treat
7930      * the file_handle structure as an opaque data type
7931      */
7932 
7933     memcpy(target_fh, fh, total_size);
7934     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7935     target_fh->handle_type = tswap32(fh->handle_type);
7936     g_free(fh);
7937     unlock_user(target_fh, handle, total_size);
7938 
7939     if (put_user_s32(mid, mount_id)) {
7940         return -TARGET_EFAULT;
7941     }
7942 
7943     return ret;
7944 
7945 }
7946 #endif
7947 
7948 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7949 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7950                                      abi_long flags)
7951 {
7952     struct file_handle *target_fh;
7953     struct file_handle *fh;
7954     unsigned int size, total_size;
7955     abi_long ret;
7956 
7957     if (get_user_s32(size, handle)) {
7958         return -TARGET_EFAULT;
7959     }
7960 
7961     total_size = sizeof(struct file_handle) + size;
7962     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7963     if (!target_fh) {
7964         return -TARGET_EFAULT;
7965     }
7966 
7967     fh = g_memdup(target_fh, total_size);
7968     fh->handle_bytes = size;
7969     fh->handle_type = tswap32(target_fh->handle_type);
7970 
7971     ret = get_errno(open_by_handle_at(mount_fd, fh,
7972                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7973 
7974     g_free(fh);
7975 
7976     unlock_user(target_fh, handle, total_size);
7977 
7978     return ret;
7979 }
7980 #endif
7981 
7982 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7983 
7984 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7985 {
7986     int host_flags;
7987     target_sigset_t *target_mask;
7988     sigset_t host_mask;
7989     abi_long ret;
7990 
7991     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7992         return -TARGET_EINVAL;
7993     }
7994     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7995         return -TARGET_EFAULT;
7996     }
7997 
7998     target_to_host_sigset(&host_mask, target_mask);
7999 
8000     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8001 
8002     ret = get_errno(signalfd(fd, &host_mask, host_flags));
8003     if (ret >= 0) {
8004         fd_trans_register(ret, &target_signalfd_trans);
8005     }
8006 
8007     unlock_user_struct(target_mask, mask, 0);
8008 
8009     return ret;
8010 }
8011 #endif
8012 
8013 /* Map host to target signal numbers for the wait family of syscalls.
8014    Assume all other status bits are the same.  */
8015 int host_to_target_waitstatus(int status)
8016 {
8017     if (WIFSIGNALED(status)) {
8018         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8019     }
8020     if (WIFSTOPPED(status)) {
8021         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8022                | (status & 0xff);
8023     }
8024     return status;
8025 }
8026 
8027 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8028 {
8029     CPUState *cpu = env_cpu(cpu_env);
8030     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8031     int i;
8032 
8033     for (i = 0; i < bprm->argc; i++) {
8034         size_t len = strlen(bprm->argv[i]) + 1;
8035 
8036         if (write(fd, bprm->argv[i], len) != len) {
8037             return -1;
8038         }
8039     }
8040 
8041     return 0;
8042 }
8043 
8044 static int open_self_maps(CPUArchState *cpu_env, int fd)
8045 {
8046     CPUState *cpu = env_cpu(cpu_env);
8047     TaskState *ts = cpu->opaque;
8048     GSList *map_info = read_self_maps();
8049     GSList *s;
8050     int count;
8051 
8052     for (s = map_info; s; s = g_slist_next(s)) {
8053         MapInfo *e = (MapInfo *) s->data;
8054 
8055         if (h2g_valid(e->start)) {
8056             unsigned long min = e->start;
8057             unsigned long max = e->end;
8058             int flags = page_get_flags(h2g(min));
8059             const char *path;
8060 
8061             max = h2g_valid(max - 1) ?
8062                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8063 
8064             if (page_check_range(h2g(min), max - min, flags) == -1) {
8065                 continue;
8066             }
8067 
8068 #ifdef TARGET_HPPA
8069             if (h2g(max) == ts->info->stack_limit) {
8070 #else
8071             if (h2g(min) == ts->info->stack_limit) {
8072 #endif
8073                 path = "[stack]";
8074             } else {
8075                 path = e->path;
8076             }
8077 
8078             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8079                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8080                             h2g(min), h2g(max - 1) + 1,
8081                             (flags & PAGE_READ) ? 'r' : '-',
8082                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8083                             (flags & PAGE_EXEC) ? 'x' : '-',
8084                             e->is_priv ? 'p' : 's',
8085                             (uint64_t) e->offset, e->dev, e->inode);
8086             if (path) {
8087                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8088             } else {
8089                 dprintf(fd, "\n");
8090             }
8091         }
8092     }
8093 
8094     free_self_maps(map_info);
8095 
8096 #ifdef TARGET_VSYSCALL_PAGE
8097     /*
8098      * We only support execution from the vsyscall page.
8099      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8100      */
8101     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8102                     " --xp 00000000 00:00 0",
8103                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8104     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8105 #endif
8106 
8107     return 0;
8108 }
8109 
8110 static int open_self_stat(CPUArchState *cpu_env, int fd)
8111 {
8112     CPUState *cpu = env_cpu(cpu_env);
8113     TaskState *ts = cpu->opaque;
8114     g_autoptr(GString) buf = g_string_new(NULL);
8115     int i;
8116 
8117     for (i = 0; i < 44; i++) {
8118         if (i == 0) {
8119             /* pid */
8120             g_string_printf(buf, FMT_pid " ", getpid());
8121         } else if (i == 1) {
8122             /* app name */
8123             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8124             bin = bin ? bin + 1 : ts->bprm->argv[0];
8125             g_string_printf(buf, "(%.15s) ", bin);
8126         } else if (i == 2) {
8127             /* task state */
8128             g_string_assign(buf, "R "); /* we are running right now */
8129         } else if (i == 3) {
8130             /* ppid */
8131             g_string_printf(buf, FMT_pid " ", getppid());
8132         } else if (i == 21) {
8133             /* starttime */
8134             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8135         } else if (i == 27) {
8136             /* stack bottom */
8137             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8138         } else {
8139             /* for the rest, there is MasterCard */
8140             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8141         }
8142 
8143         if (write(fd, buf->str, buf->len) != buf->len) {
8144             return -1;
8145         }
8146     }
8147 
8148     return 0;
8149 }
8150 
8151 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8152 {
8153     CPUState *cpu = env_cpu(cpu_env);
8154     TaskState *ts = cpu->opaque;
8155     abi_ulong auxv = ts->info->saved_auxv;
8156     abi_ulong len = ts->info->auxv_len;
8157     char *ptr;
8158 
8159     /*
8160      * Auxiliary vector is stored in target process stack.
8161      * read in whole auxv vector and copy it to file
8162      */
8163     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8164     if (ptr != NULL) {
8165         while (len > 0) {
8166             ssize_t r;
8167             r = write(fd, ptr, len);
8168             if (r <= 0) {
8169                 break;
8170             }
8171             len -= r;
8172             ptr += r;
8173         }
8174         lseek(fd, 0, SEEK_SET);
8175         unlock_user(ptr, auxv, len);
8176     }
8177 
8178     return 0;
8179 }
8180 
8181 static int is_proc_myself(const char *filename, const char *entry)
8182 {
8183     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8184         filename += strlen("/proc/");
8185         if (!strncmp(filename, "self/", strlen("self/"))) {
8186             filename += strlen("self/");
8187         } else if (*filename >= '1' && *filename <= '9') {
8188             char myself[80];
8189             snprintf(myself, sizeof(myself), "%d/", getpid());
8190             if (!strncmp(filename, myself, strlen(myself))) {
8191                 filename += strlen(myself);
8192             } else {
8193                 return 0;
8194             }
8195         } else {
8196             return 0;
8197         }
8198         if (!strcmp(filename, entry)) {
8199             return 1;
8200         }
8201     }
8202     return 0;
8203 }
8204 
8205 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8206                       const char *fmt, int code)
8207 {
8208     if (logfile) {
8209         CPUState *cs = env_cpu(env);
8210 
8211         fprintf(logfile, fmt, code);
8212         fprintf(logfile, "Failing executable: %s\n", exec_path);
8213         cpu_dump_state(cs, logfile, 0);
8214         open_self_maps(env, fileno(logfile));
8215     }
8216 }
8217 
8218 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8219 {
8220     /* dump to console */
8221     excp_dump_file(stderr, env, fmt, code);
8222 
8223     /* dump to log file */
8224     if (qemu_log_separate()) {
8225         FILE *logfile = qemu_log_trylock();
8226 
8227         excp_dump_file(logfile, env, fmt, code);
8228         qemu_log_unlock(logfile);
8229     }
8230 }
8231 
8232 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8233     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8234 static int is_proc(const char *filename, const char *entry)
8235 {
8236     return strcmp(filename, entry) == 0;
8237 }
8238 #endif
8239 
8240 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8241 static int open_net_route(CPUArchState *cpu_env, int fd)
8242 {
8243     FILE *fp;
8244     char *line = NULL;
8245     size_t len = 0;
8246     ssize_t read;
8247 
8248     fp = fopen("/proc/net/route", "r");
8249     if (fp == NULL) {
8250         return -1;
8251     }
8252 
8253     /* read header */
8254 
8255     read = getline(&line, &len, fp);
8256     dprintf(fd, "%s", line);
8257 
8258     /* read routes */
8259 
8260     while ((read = getline(&line, &len, fp)) != -1) {
8261         char iface[16];
8262         uint32_t dest, gw, mask;
8263         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8264         int fields;
8265 
8266         fields = sscanf(line,
8267                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8268                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8269                         &mask, &mtu, &window, &irtt);
8270         if (fields != 11) {
8271             continue;
8272         }
8273         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8274                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8275                 metric, tswap32(mask), mtu, window, irtt);
8276     }
8277 
8278     free(line);
8279     fclose(fp);
8280 
8281     return 0;
8282 }
8283 #endif
8284 
8285 #if defined(TARGET_SPARC)
8286 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8287 {
8288     dprintf(fd, "type\t\t: sun4u\n");
8289     return 0;
8290 }
8291 #endif
8292 
8293 #if defined(TARGET_HPPA)
8294 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8295 {
8296     int i, num_cpus;
8297 
8298     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8299     for (i = 0; i < num_cpus; i++) {
8300         dprintf(fd, "processor\t: %d\n", i);
8301         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8302         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8303         dprintf(fd, "capabilities\t: os32\n");
8304         dprintf(fd, "model\t\t: 9000/778/B160L - "
8305                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8306     }
8307     return 0;
8308 }
8309 #endif
8310 
8311 #if defined(TARGET_M68K)
8312 static int open_hardware(CPUArchState *cpu_env, int fd)
8313 {
8314     dprintf(fd, "Model:\t\tqemu-m68k\n");
8315     return 0;
8316 }
8317 #endif
8318 
8319 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8320 {
8321     struct fake_open {
8322         const char *filename;
8323         int (*fill)(CPUArchState *cpu_env, int fd);
8324         int (*cmp)(const char *s1, const char *s2);
8325     };
8326     const struct fake_open *fake_open;
8327     static const struct fake_open fakes[] = {
8328         { "maps", open_self_maps, is_proc_myself },
8329         { "stat", open_self_stat, is_proc_myself },
8330         { "auxv", open_self_auxv, is_proc_myself },
8331         { "cmdline", open_self_cmdline, is_proc_myself },
8332 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8333         { "/proc/net/route", open_net_route, is_proc },
8334 #endif
8335 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8336         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8337 #endif
8338 #if defined(TARGET_M68K)
8339         { "/proc/hardware", open_hardware, is_proc },
8340 #endif
8341         { NULL, NULL, NULL }
8342     };
8343 
8344     if (is_proc_myself(pathname, "exe")) {
8345         return safe_openat(dirfd, exec_path, flags, mode);
8346     }
8347 
8348     for (fake_open = fakes; fake_open->filename; fake_open++) {
8349         if (fake_open->cmp(pathname, fake_open->filename)) {
8350             break;
8351         }
8352     }
8353 
8354     if (fake_open->filename) {
8355         const char *tmpdir;
8356         char filename[PATH_MAX];
8357         int fd, r;
8358 
8359         fd = memfd_create("qemu-open", 0);
8360         if (fd < 0) {
8361             if (errno != ENOSYS) {
8362                 return fd;
8363             }
8364             /* create temporary file to map stat to */
8365             tmpdir = getenv("TMPDIR");
8366             if (!tmpdir)
8367                 tmpdir = "/tmp";
8368             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8369             fd = mkstemp(filename);
8370             if (fd < 0) {
8371                 return fd;
8372             }
8373             unlink(filename);
8374         }
8375 
8376         if ((r = fake_open->fill(cpu_env, fd))) {
8377             int e = errno;
8378             close(fd);
8379             errno = e;
8380             return r;
8381         }
8382         lseek(fd, 0, SEEK_SET);
8383 
8384         return fd;
8385     }
8386 
8387     return safe_openat(dirfd, path(pathname), flags, mode);
8388 }
8389 
8390 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8391                        abi_long pathname, abi_long guest_argp,
8392                        abi_long guest_envp, int flags)
8393 {
8394     int ret;
8395     char **argp, **envp;
8396     int argc, envc;
8397     abi_ulong gp;
8398     abi_ulong addr;
8399     char **q;
8400     void *p;
8401 
8402     argc = 0;
8403 
8404     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8405         if (get_user_ual(addr, gp)) {
8406             return -TARGET_EFAULT;
8407         }
8408         if (!addr) {
8409             break;
8410         }
8411         argc++;
8412     }
8413     envc = 0;
8414     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8415         if (get_user_ual(addr, gp)) {
8416             return -TARGET_EFAULT;
8417         }
8418         if (!addr) {
8419             break;
8420         }
8421         envc++;
8422     }
8423 
8424     argp = g_new0(char *, argc + 1);
8425     envp = g_new0(char *, envc + 1);
8426 
8427     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8428         if (get_user_ual(addr, gp)) {
8429             goto execve_efault;
8430         }
8431         if (!addr) {
8432             break;
8433         }
8434         *q = lock_user_string(addr);
8435         if (!*q) {
8436             goto execve_efault;
8437         }
8438     }
8439     *q = NULL;
8440 
8441     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8442         if (get_user_ual(addr, gp)) {
8443             goto execve_efault;
8444         }
8445         if (!addr) {
8446             break;
8447         }
8448         *q = lock_user_string(addr);
8449         if (!*q) {
8450             goto execve_efault;
8451         }
8452     }
8453     *q = NULL;
8454 
8455     /*
8456      * Although execve() is not an interruptible syscall it is
8457      * a special case where we must use the safe_syscall wrapper:
8458      * if we allow a signal to happen before we make the host
8459      * syscall then we will 'lose' it, because at the point of
8460      * execve the process leaves QEMU's control. So we use the
8461      * safe syscall wrapper to ensure that we either take the
8462      * signal as a guest signal, or else it does not happen
8463      * before the execve completes and makes it the other
8464      * program's problem.
8465      */
8466     p = lock_user_string(pathname);
8467     if (!p) {
8468         goto execve_efault;
8469     }
8470 
8471     if (is_proc_myself(p, "exe")) {
8472         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8473     } else {
8474         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8475     }
8476 
8477     unlock_user(p, pathname, 0);
8478 
8479     goto execve_end;
8480 
8481 execve_efault:
8482     ret = -TARGET_EFAULT;
8483 
8484 execve_end:
8485     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8486         if (get_user_ual(addr, gp) || !addr) {
8487             break;
8488         }
8489         unlock_user(*q, addr, 0);
8490     }
8491     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8492         if (get_user_ual(addr, gp) || !addr) {
8493             break;
8494         }
8495         unlock_user(*q, addr, 0);
8496     }
8497 
8498     g_free(argp);
8499     g_free(envp);
8500     return ret;
8501 }
8502 
8503 #define TIMER_MAGIC 0x0caf0000
8504 #define TIMER_MAGIC_MASK 0xffff0000
8505 
8506 /* Convert QEMU provided timer ID back to internal 16bit index format */
8507 static target_timer_t get_timer_id(abi_long arg)
8508 {
8509     target_timer_t timerid = arg;
8510 
8511     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8512         return -TARGET_EINVAL;
8513     }
8514 
8515     timerid &= 0xffff;
8516 
8517     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8518         return -TARGET_EINVAL;
8519     }
8520 
8521     return timerid;
8522 }
8523 
8524 static int target_to_host_cpu_mask(unsigned long *host_mask,
8525                                    size_t host_size,
8526                                    abi_ulong target_addr,
8527                                    size_t target_size)
8528 {
8529     unsigned target_bits = sizeof(abi_ulong) * 8;
8530     unsigned host_bits = sizeof(*host_mask) * 8;
8531     abi_ulong *target_mask;
8532     unsigned i, j;
8533 
8534     assert(host_size >= target_size);
8535 
8536     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8537     if (!target_mask) {
8538         return -TARGET_EFAULT;
8539     }
8540     memset(host_mask, 0, host_size);
8541 
8542     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8543         unsigned bit = i * target_bits;
8544         abi_ulong val;
8545 
8546         __get_user(val, &target_mask[i]);
8547         for (j = 0; j < target_bits; j++, bit++) {
8548             if (val & (1UL << j)) {
8549                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8550             }
8551         }
8552     }
8553 
8554     unlock_user(target_mask, target_addr, 0);
8555     return 0;
8556 }
8557 
8558 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8559                                    size_t host_size,
8560                                    abi_ulong target_addr,
8561                                    size_t target_size)
8562 {
8563     unsigned target_bits = sizeof(abi_ulong) * 8;
8564     unsigned host_bits = sizeof(*host_mask) * 8;
8565     abi_ulong *target_mask;
8566     unsigned i, j;
8567 
8568     assert(host_size >= target_size);
8569 
8570     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8571     if (!target_mask) {
8572         return -TARGET_EFAULT;
8573     }
8574 
8575     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8576         unsigned bit = i * target_bits;
8577         abi_ulong val = 0;
8578 
8579         for (j = 0; j < target_bits; j++, bit++) {
8580             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8581                 val |= 1UL << j;
8582             }
8583         }
8584         __put_user(val, &target_mask[i]);
8585     }
8586 
8587     unlock_user(target_mask, target_addr, target_size);
8588     return 0;
8589 }
8590 
8591 #ifdef TARGET_NR_getdents
8592 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8593 {
8594     g_autofree void *hdirp = NULL;
8595     void *tdirp;
8596     int hlen, hoff, toff;
8597     int hreclen, treclen;
8598     off64_t prev_diroff = 0;
8599 
8600     hdirp = g_try_malloc(count);
8601     if (!hdirp) {
8602         return -TARGET_ENOMEM;
8603     }
8604 
8605 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8606     hlen = sys_getdents(dirfd, hdirp, count);
8607 #else
8608     hlen = sys_getdents64(dirfd, hdirp, count);
8609 #endif
8610 
8611     hlen = get_errno(hlen);
8612     if (is_error(hlen)) {
8613         return hlen;
8614     }
8615 
8616     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8617     if (!tdirp) {
8618         return -TARGET_EFAULT;
8619     }
8620 
8621     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8622 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8623         struct linux_dirent *hde = hdirp + hoff;
8624 #else
8625         struct linux_dirent64 *hde = hdirp + hoff;
8626 #endif
8627         struct target_dirent *tde = tdirp + toff;
8628         int namelen;
8629         uint8_t type;
8630 
8631         namelen = strlen(hde->d_name);
8632         hreclen = hde->d_reclen;
8633         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8634         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8635 
8636         if (toff + treclen > count) {
8637             /*
8638              * If the host struct is smaller than the target struct, or
8639              * requires less alignment and thus packs into less space,
8640              * then the host can return more entries than we can pass
8641              * on to the guest.
8642              */
8643             if (toff == 0) {
8644                 toff = -TARGET_EINVAL; /* result buffer is too small */
8645                 break;
8646             }
8647             /*
8648              * Return what we have, resetting the file pointer to the
8649              * location of the first record not returned.
8650              */
8651             lseek64(dirfd, prev_diroff, SEEK_SET);
8652             break;
8653         }
8654 
8655         prev_diroff = hde->d_off;
8656         tde->d_ino = tswapal(hde->d_ino);
8657         tde->d_off = tswapal(hde->d_off);
8658         tde->d_reclen = tswap16(treclen);
8659         memcpy(tde->d_name, hde->d_name, namelen + 1);
8660 
8661         /*
8662          * The getdents type is in what was formerly a padding byte at the
8663          * end of the structure.
8664          */
8665 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8666         type = *((uint8_t *)hde + hreclen - 1);
8667 #else
8668         type = hde->d_type;
8669 #endif
8670         *((uint8_t *)tde + treclen - 1) = type;
8671     }
8672 
8673     unlock_user(tdirp, arg2, toff);
8674     return toff;
8675 }
8676 #endif /* TARGET_NR_getdents */
8677 
8678 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8679 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8680 {
8681     g_autofree void *hdirp = NULL;
8682     void *tdirp;
8683     int hlen, hoff, toff;
8684     int hreclen, treclen;
8685     off64_t prev_diroff = 0;
8686 
8687     hdirp = g_try_malloc(count);
8688     if (!hdirp) {
8689         return -TARGET_ENOMEM;
8690     }
8691 
8692     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8693     if (is_error(hlen)) {
8694         return hlen;
8695     }
8696 
8697     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8698     if (!tdirp) {
8699         return -TARGET_EFAULT;
8700     }
8701 
8702     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8703         struct linux_dirent64 *hde = hdirp + hoff;
8704         struct target_dirent64 *tde = tdirp + toff;
8705         int namelen;
8706 
8707         namelen = strlen(hde->d_name) + 1;
8708         hreclen = hde->d_reclen;
8709         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8710         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8711 
8712         if (toff + treclen > count) {
8713             /*
8714              * If the host struct is smaller than the target struct, or
8715              * requires less alignment and thus packs into less space,
8716              * then the host can return more entries than we can pass
8717              * on to the guest.
8718              */
8719             if (toff == 0) {
8720                 toff = -TARGET_EINVAL; /* result buffer is too small */
8721                 break;
8722             }
8723             /*
8724              * Return what we have, resetting the file pointer to the
8725              * location of the first record not returned.
8726              */
8727             lseek64(dirfd, prev_diroff, SEEK_SET);
8728             break;
8729         }
8730 
8731         prev_diroff = hde->d_off;
8732         tde->d_ino = tswap64(hde->d_ino);
8733         tde->d_off = tswap64(hde->d_off);
8734         tde->d_reclen = tswap16(treclen);
8735         tde->d_type = hde->d_type;
8736         memcpy(tde->d_name, hde->d_name, namelen);
8737     }
8738 
8739     unlock_user(tdirp, arg2, toff);
8740     return toff;
8741 }
8742 #endif /* TARGET_NR_getdents64 */
8743 
8744 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8745 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8746 #endif
8747 
8748 /* This is an internal helper for do_syscall so that it is easier
8749  * to have a single return point, so that actions, such as logging
8750  * of syscall results, can be performed.
8751  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8752  */
8753 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8754                             abi_long arg2, abi_long arg3, abi_long arg4,
8755                             abi_long arg5, abi_long arg6, abi_long arg7,
8756                             abi_long arg8)
8757 {
8758     CPUState *cpu = env_cpu(cpu_env);
8759     abi_long ret;
8760 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8761     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8762     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8763     || defined(TARGET_NR_statx)
8764     struct stat st;
8765 #endif
8766 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8767     || defined(TARGET_NR_fstatfs)
8768     struct statfs stfs;
8769 #endif
8770     void *p;
8771 
8772     switch(num) {
8773     case TARGET_NR_exit:
8774         /* In old applications this may be used to implement _exit(2).
8775            However in threaded applications it is used for thread termination,
8776            and _exit_group is used for application termination.
8777            Do thread termination if we have more then one thread.  */
8778 
8779         if (block_signals()) {
8780             return -QEMU_ERESTARTSYS;
8781         }
8782 
8783         pthread_mutex_lock(&clone_lock);
8784 
8785         if (CPU_NEXT(first_cpu)) {
8786             TaskState *ts = cpu->opaque;
8787 
8788             if (ts->child_tidptr) {
8789                 put_user_u32(0, ts->child_tidptr);
8790                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8791                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8792             }
8793 
8794             object_unparent(OBJECT(cpu));
8795             object_unref(OBJECT(cpu));
8796             /*
8797              * At this point the CPU should be unrealized and removed
8798              * from cpu lists. We can clean-up the rest of the thread
8799              * data without the lock held.
8800              */
8801 
8802             pthread_mutex_unlock(&clone_lock);
8803 
8804             thread_cpu = NULL;
8805             g_free(ts);
8806             rcu_unregister_thread();
8807             pthread_exit(NULL);
8808         }
8809 
8810         pthread_mutex_unlock(&clone_lock);
8811         preexit_cleanup(cpu_env, arg1);
8812         _exit(arg1);
8813         return 0; /* avoid warning */
8814     case TARGET_NR_read:
8815         if (arg2 == 0 && arg3 == 0) {
8816             return get_errno(safe_read(arg1, 0, 0));
8817         } else {
8818             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8819                 return -TARGET_EFAULT;
8820             ret = get_errno(safe_read(arg1, p, arg3));
8821             if (ret >= 0 &&
8822                 fd_trans_host_to_target_data(arg1)) {
8823                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8824             }
8825             unlock_user(p, arg2, ret);
8826         }
8827         return ret;
8828     case TARGET_NR_write:
8829         if (arg2 == 0 && arg3 == 0) {
8830             return get_errno(safe_write(arg1, 0, 0));
8831         }
8832         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8833             return -TARGET_EFAULT;
8834         if (fd_trans_target_to_host_data(arg1)) {
8835             void *copy = g_malloc(arg3);
8836             memcpy(copy, p, arg3);
8837             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8838             if (ret >= 0) {
8839                 ret = get_errno(safe_write(arg1, copy, ret));
8840             }
8841             g_free(copy);
8842         } else {
8843             ret = get_errno(safe_write(arg1, p, arg3));
8844         }
8845         unlock_user(p, arg2, 0);
8846         return ret;
8847 
8848 #ifdef TARGET_NR_open
8849     case TARGET_NR_open:
8850         if (!(p = lock_user_string(arg1)))
8851             return -TARGET_EFAULT;
8852         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8853                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8854                                   arg3));
8855         fd_trans_unregister(ret);
8856         unlock_user(p, arg1, 0);
8857         return ret;
8858 #endif
8859     case TARGET_NR_openat:
8860         if (!(p = lock_user_string(arg2)))
8861             return -TARGET_EFAULT;
8862         ret = get_errno(do_openat(cpu_env, arg1, p,
8863                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8864                                   arg4));
8865         fd_trans_unregister(ret);
8866         unlock_user(p, arg2, 0);
8867         return ret;
8868 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8869     case TARGET_NR_name_to_handle_at:
8870         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8871         return ret;
8872 #endif
8873 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8874     case TARGET_NR_open_by_handle_at:
8875         ret = do_open_by_handle_at(arg1, arg2, arg3);
8876         fd_trans_unregister(ret);
8877         return ret;
8878 #endif
8879 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8880     case TARGET_NR_pidfd_open:
8881         return get_errno(pidfd_open(arg1, arg2));
8882 #endif
8883 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8884     case TARGET_NR_pidfd_send_signal:
8885         {
8886             siginfo_t uinfo, *puinfo;
8887 
8888             if (arg3) {
8889                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8890                 if (!p) {
8891                     return -TARGET_EFAULT;
8892                  }
8893                  target_to_host_siginfo(&uinfo, p);
8894                  unlock_user(p, arg3, 0);
8895                  puinfo = &uinfo;
8896             } else {
8897                  puinfo = NULL;
8898             }
8899             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8900                                               puinfo, arg4));
8901         }
8902         return ret;
8903 #endif
8904 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8905     case TARGET_NR_pidfd_getfd:
8906         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8907 #endif
8908     case TARGET_NR_close:
8909         fd_trans_unregister(arg1);
8910         return get_errno(close(arg1));
8911 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8912     case TARGET_NR_close_range:
8913         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8914         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8915             abi_long fd, maxfd;
8916             maxfd = MIN(arg2, target_fd_max);
8917             for (fd = arg1; fd < maxfd; fd++) {
8918                 fd_trans_unregister(fd);
8919             }
8920         }
8921         return ret;
8922 #endif
8923 
8924     case TARGET_NR_brk:
8925         return do_brk(arg1);
8926 #ifdef TARGET_NR_fork
8927     case TARGET_NR_fork:
8928         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8929 #endif
8930 #ifdef TARGET_NR_waitpid
8931     case TARGET_NR_waitpid:
8932         {
8933             int status;
8934             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8935             if (!is_error(ret) && arg2 && ret
8936                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8937                 return -TARGET_EFAULT;
8938         }
8939         return ret;
8940 #endif
8941 #ifdef TARGET_NR_waitid
8942     case TARGET_NR_waitid:
8943         {
8944             siginfo_t info;
8945             info.si_pid = 0;
8946             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8947             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8948                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8949                     return -TARGET_EFAULT;
8950                 host_to_target_siginfo(p, &info);
8951                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8952             }
8953         }
8954         return ret;
8955 #endif
8956 #ifdef TARGET_NR_creat /* not on alpha */
8957     case TARGET_NR_creat:
8958         if (!(p = lock_user_string(arg1)))
8959             return -TARGET_EFAULT;
8960         ret = get_errno(creat(p, arg2));
8961         fd_trans_unregister(ret);
8962         unlock_user(p, arg1, 0);
8963         return ret;
8964 #endif
8965 #ifdef TARGET_NR_link
8966     case TARGET_NR_link:
8967         {
8968             void * p2;
8969             p = lock_user_string(arg1);
8970             p2 = lock_user_string(arg2);
8971             if (!p || !p2)
8972                 ret = -TARGET_EFAULT;
8973             else
8974                 ret = get_errno(link(p, p2));
8975             unlock_user(p2, arg2, 0);
8976             unlock_user(p, arg1, 0);
8977         }
8978         return ret;
8979 #endif
8980 #if defined(TARGET_NR_linkat)
8981     case TARGET_NR_linkat:
8982         {
8983             void * p2 = NULL;
8984             if (!arg2 || !arg4)
8985                 return -TARGET_EFAULT;
8986             p  = lock_user_string(arg2);
8987             p2 = lock_user_string(arg4);
8988             if (!p || !p2)
8989                 ret = -TARGET_EFAULT;
8990             else
8991                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8992             unlock_user(p, arg2, 0);
8993             unlock_user(p2, arg4, 0);
8994         }
8995         return ret;
8996 #endif
8997 #ifdef TARGET_NR_unlink
8998     case TARGET_NR_unlink:
8999         if (!(p = lock_user_string(arg1)))
9000             return -TARGET_EFAULT;
9001         ret = get_errno(unlink(p));
9002         unlock_user(p, arg1, 0);
9003         return ret;
9004 #endif
9005 #if defined(TARGET_NR_unlinkat)
9006     case TARGET_NR_unlinkat:
9007         if (!(p = lock_user_string(arg2)))
9008             return -TARGET_EFAULT;
9009         ret = get_errno(unlinkat(arg1, p, arg3));
9010         unlock_user(p, arg2, 0);
9011         return ret;
9012 #endif
9013     case TARGET_NR_execveat:
9014         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
9015     case TARGET_NR_execve:
9016         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
9017     case TARGET_NR_chdir:
9018         if (!(p = lock_user_string(arg1)))
9019             return -TARGET_EFAULT;
9020         ret = get_errno(chdir(p));
9021         unlock_user(p, arg1, 0);
9022         return ret;
9023 #ifdef TARGET_NR_time
9024     case TARGET_NR_time:
9025         {
9026             time_t host_time;
9027             ret = get_errno(time(&host_time));
9028             if (!is_error(ret)
9029                 && arg1
9030                 && put_user_sal(host_time, arg1))
9031                 return -TARGET_EFAULT;
9032         }
9033         return ret;
9034 #endif
9035 #ifdef TARGET_NR_mknod
9036     case TARGET_NR_mknod:
9037         if (!(p = lock_user_string(arg1)))
9038             return -TARGET_EFAULT;
9039         ret = get_errno(mknod(p, arg2, arg3));
9040         unlock_user(p, arg1, 0);
9041         return ret;
9042 #endif
9043 #if defined(TARGET_NR_mknodat)
9044     case TARGET_NR_mknodat:
9045         if (!(p = lock_user_string(arg2)))
9046             return -TARGET_EFAULT;
9047         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9048         unlock_user(p, arg2, 0);
9049         return ret;
9050 #endif
9051 #ifdef TARGET_NR_chmod
9052     case TARGET_NR_chmod:
9053         if (!(p = lock_user_string(arg1)))
9054             return -TARGET_EFAULT;
9055         ret = get_errno(chmod(p, arg2));
9056         unlock_user(p, arg1, 0);
9057         return ret;
9058 #endif
9059 #ifdef TARGET_NR_lseek
9060     case TARGET_NR_lseek:
9061         return get_errno(lseek(arg1, arg2, arg3));
9062 #endif
9063 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9064     /* Alpha specific */
9065     case TARGET_NR_getxpid:
9066         cpu_env->ir[IR_A4] = getppid();
9067         return get_errno(getpid());
9068 #endif
9069 #ifdef TARGET_NR_getpid
9070     case TARGET_NR_getpid:
9071         return get_errno(getpid());
9072 #endif
9073     case TARGET_NR_mount:
9074         {
9075             /* need to look at the data field */
9076             void *p2, *p3;
9077 
9078             if (arg1) {
9079                 p = lock_user_string(arg1);
9080                 if (!p) {
9081                     return -TARGET_EFAULT;
9082                 }
9083             } else {
9084                 p = NULL;
9085             }
9086 
9087             p2 = lock_user_string(arg2);
9088             if (!p2) {
9089                 if (arg1) {
9090                     unlock_user(p, arg1, 0);
9091                 }
9092                 return -TARGET_EFAULT;
9093             }
9094 
9095             if (arg3) {
9096                 p3 = lock_user_string(arg3);
9097                 if (!p3) {
9098                     if (arg1) {
9099                         unlock_user(p, arg1, 0);
9100                     }
9101                     unlock_user(p2, arg2, 0);
9102                     return -TARGET_EFAULT;
9103                 }
9104             } else {
9105                 p3 = NULL;
9106             }
9107 
9108             /* FIXME - arg5 should be locked, but it isn't clear how to
9109              * do that since it's not guaranteed to be a NULL-terminated
9110              * string.
9111              */
9112             if (!arg5) {
9113                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9114             } else {
9115                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9116             }
9117             ret = get_errno(ret);
9118 
9119             if (arg1) {
9120                 unlock_user(p, arg1, 0);
9121             }
9122             unlock_user(p2, arg2, 0);
9123             if (arg3) {
9124                 unlock_user(p3, arg3, 0);
9125             }
9126         }
9127         return ret;
9128 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9129 #if defined(TARGET_NR_umount)
9130     case TARGET_NR_umount:
9131 #endif
9132 #if defined(TARGET_NR_oldumount)
9133     case TARGET_NR_oldumount:
9134 #endif
9135         if (!(p = lock_user_string(arg1)))
9136             return -TARGET_EFAULT;
9137         ret = get_errno(umount(p));
9138         unlock_user(p, arg1, 0);
9139         return ret;
9140 #endif
9141 #ifdef TARGET_NR_stime /* not on alpha */
9142     case TARGET_NR_stime:
9143         {
9144             struct timespec ts;
9145             ts.tv_nsec = 0;
9146             if (get_user_sal(ts.tv_sec, arg1)) {
9147                 return -TARGET_EFAULT;
9148             }
9149             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9150         }
9151 #endif
9152 #ifdef TARGET_NR_alarm /* not on alpha */
9153     case TARGET_NR_alarm:
9154         return alarm(arg1);
9155 #endif
9156 #ifdef TARGET_NR_pause /* not on alpha */
9157     case TARGET_NR_pause:
9158         if (!block_signals()) {
9159             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9160         }
9161         return -TARGET_EINTR;
9162 #endif
9163 #ifdef TARGET_NR_utime
9164     case TARGET_NR_utime:
9165         {
9166             struct utimbuf tbuf, *host_tbuf;
9167             struct target_utimbuf *target_tbuf;
9168             if (arg2) {
9169                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9170                     return -TARGET_EFAULT;
9171                 tbuf.actime = tswapal(target_tbuf->actime);
9172                 tbuf.modtime = tswapal(target_tbuf->modtime);
9173                 unlock_user_struct(target_tbuf, arg2, 0);
9174                 host_tbuf = &tbuf;
9175             } else {
9176                 host_tbuf = NULL;
9177             }
9178             if (!(p = lock_user_string(arg1)))
9179                 return -TARGET_EFAULT;
9180             ret = get_errno(utime(p, host_tbuf));
9181             unlock_user(p, arg1, 0);
9182         }
9183         return ret;
9184 #endif
9185 #ifdef TARGET_NR_utimes
9186     case TARGET_NR_utimes:
9187         {
9188             struct timeval *tvp, tv[2];
9189             if (arg2) {
9190                 if (copy_from_user_timeval(&tv[0], arg2)
9191                     || copy_from_user_timeval(&tv[1],
9192                                               arg2 + sizeof(struct target_timeval)))
9193                     return -TARGET_EFAULT;
9194                 tvp = tv;
9195             } else {
9196                 tvp = NULL;
9197             }
9198             if (!(p = lock_user_string(arg1)))
9199                 return -TARGET_EFAULT;
9200             ret = get_errno(utimes(p, tvp));
9201             unlock_user(p, arg1, 0);
9202         }
9203         return ret;
9204 #endif
9205 #if defined(TARGET_NR_futimesat)
9206     case TARGET_NR_futimesat:
9207         {
9208             struct timeval *tvp, tv[2];
9209             if (arg3) {
9210                 if (copy_from_user_timeval(&tv[0], arg3)
9211                     || copy_from_user_timeval(&tv[1],
9212                                               arg3 + sizeof(struct target_timeval)))
9213                     return -TARGET_EFAULT;
9214                 tvp = tv;
9215             } else {
9216                 tvp = NULL;
9217             }
9218             if (!(p = lock_user_string(arg2))) {
9219                 return -TARGET_EFAULT;
9220             }
9221             ret = get_errno(futimesat(arg1, path(p), tvp));
9222             unlock_user(p, arg2, 0);
9223         }
9224         return ret;
9225 #endif
9226 #ifdef TARGET_NR_access
9227     case TARGET_NR_access:
9228         if (!(p = lock_user_string(arg1))) {
9229             return -TARGET_EFAULT;
9230         }
9231         ret = get_errno(access(path(p), arg2));
9232         unlock_user(p, arg1, 0);
9233         return ret;
9234 #endif
9235 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9236     case TARGET_NR_faccessat:
9237         if (!(p = lock_user_string(arg2))) {
9238             return -TARGET_EFAULT;
9239         }
9240         ret = get_errno(faccessat(arg1, p, arg3, 0));
9241         unlock_user(p, arg2, 0);
9242         return ret;
9243 #endif
9244 #if defined(TARGET_NR_faccessat2)
9245     case TARGET_NR_faccessat2:
9246         if (!(p = lock_user_string(arg2))) {
9247             return -TARGET_EFAULT;
9248         }
9249         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9250         unlock_user(p, arg2, 0);
9251         return ret;
9252 #endif
9253 #ifdef TARGET_NR_nice /* not on alpha */
9254     case TARGET_NR_nice:
9255         return get_errno(nice(arg1));
9256 #endif
9257     case TARGET_NR_sync:
9258         sync();
9259         return 0;
9260 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9261     case TARGET_NR_syncfs:
9262         return get_errno(syncfs(arg1));
9263 #endif
9264     case TARGET_NR_kill:
9265         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9266 #ifdef TARGET_NR_rename
9267     case TARGET_NR_rename:
9268         {
9269             void *p2;
9270             p = lock_user_string(arg1);
9271             p2 = lock_user_string(arg2);
9272             if (!p || !p2)
9273                 ret = -TARGET_EFAULT;
9274             else
9275                 ret = get_errno(rename(p, p2));
9276             unlock_user(p2, arg2, 0);
9277             unlock_user(p, arg1, 0);
9278         }
9279         return ret;
9280 #endif
9281 #if defined(TARGET_NR_renameat)
9282     case TARGET_NR_renameat:
9283         {
9284             void *p2;
9285             p  = lock_user_string(arg2);
9286             p2 = lock_user_string(arg4);
9287             if (!p || !p2)
9288                 ret = -TARGET_EFAULT;
9289             else
9290                 ret = get_errno(renameat(arg1, p, arg3, p2));
9291             unlock_user(p2, arg4, 0);
9292             unlock_user(p, arg2, 0);
9293         }
9294         return ret;
9295 #endif
9296 #if defined(TARGET_NR_renameat2)
9297     case TARGET_NR_renameat2:
9298         {
9299             void *p2;
9300             p  = lock_user_string(arg2);
9301             p2 = lock_user_string(arg4);
9302             if (!p || !p2) {
9303                 ret = -TARGET_EFAULT;
9304             } else {
9305                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9306             }
9307             unlock_user(p2, arg4, 0);
9308             unlock_user(p, arg2, 0);
9309         }
9310         return ret;
9311 #endif
9312 #ifdef TARGET_NR_mkdir
9313     case TARGET_NR_mkdir:
9314         if (!(p = lock_user_string(arg1)))
9315             return -TARGET_EFAULT;
9316         ret = get_errno(mkdir(p, arg2));
9317         unlock_user(p, arg1, 0);
9318         return ret;
9319 #endif
9320 #if defined(TARGET_NR_mkdirat)
9321     case TARGET_NR_mkdirat:
9322         if (!(p = lock_user_string(arg2)))
9323             return -TARGET_EFAULT;
9324         ret = get_errno(mkdirat(arg1, p, arg3));
9325         unlock_user(p, arg2, 0);
9326         return ret;
9327 #endif
9328 #ifdef TARGET_NR_rmdir
9329     case TARGET_NR_rmdir:
9330         if (!(p = lock_user_string(arg1)))
9331             return -TARGET_EFAULT;
9332         ret = get_errno(rmdir(p));
9333         unlock_user(p, arg1, 0);
9334         return ret;
9335 #endif
9336     case TARGET_NR_dup:
9337         ret = get_errno(dup(arg1));
9338         if (ret >= 0) {
9339             fd_trans_dup(arg1, ret);
9340         }
9341         return ret;
9342 #ifdef TARGET_NR_pipe
9343     case TARGET_NR_pipe:
9344         return do_pipe(cpu_env, arg1, 0, 0);
9345 #endif
9346 #ifdef TARGET_NR_pipe2
9347     case TARGET_NR_pipe2:
9348         return do_pipe(cpu_env, arg1,
9349                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9350 #endif
9351     case TARGET_NR_times:
9352         {
9353             struct target_tms *tmsp;
9354             struct tms tms;
9355             ret = get_errno(times(&tms));
9356             if (arg1) {
9357                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9358                 if (!tmsp)
9359                     return -TARGET_EFAULT;
9360                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9361                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9362                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9363                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9364             }
9365             if (!is_error(ret))
9366                 ret = host_to_target_clock_t(ret);
9367         }
9368         return ret;
9369     case TARGET_NR_acct:
9370         if (arg1 == 0) {
9371             ret = get_errno(acct(NULL));
9372         } else {
9373             if (!(p = lock_user_string(arg1))) {
9374                 return -TARGET_EFAULT;
9375             }
9376             ret = get_errno(acct(path(p)));
9377             unlock_user(p, arg1, 0);
9378         }
9379         return ret;
9380 #ifdef TARGET_NR_umount2
9381     case TARGET_NR_umount2:
9382         if (!(p = lock_user_string(arg1)))
9383             return -TARGET_EFAULT;
9384         ret = get_errno(umount2(p, arg2));
9385         unlock_user(p, arg1, 0);
9386         return ret;
9387 #endif
9388     case TARGET_NR_ioctl:
9389         return do_ioctl(arg1, arg2, arg3);
9390 #ifdef TARGET_NR_fcntl
9391     case TARGET_NR_fcntl:
9392         return do_fcntl(arg1, arg2, arg3);
9393 #endif
9394     case TARGET_NR_setpgid:
9395         return get_errno(setpgid(arg1, arg2));
9396     case TARGET_NR_umask:
9397         return get_errno(umask(arg1));
9398     case TARGET_NR_chroot:
9399         if (!(p = lock_user_string(arg1)))
9400             return -TARGET_EFAULT;
9401         ret = get_errno(chroot(p));
9402         unlock_user(p, arg1, 0);
9403         return ret;
9404 #ifdef TARGET_NR_dup2
9405     case TARGET_NR_dup2:
9406         ret = get_errno(dup2(arg1, arg2));
9407         if (ret >= 0) {
9408             fd_trans_dup(arg1, arg2);
9409         }
9410         return ret;
9411 #endif
9412 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9413     case TARGET_NR_dup3:
9414     {
9415         int host_flags;
9416 
9417         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9418             return -EINVAL;
9419         }
9420         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9421         ret = get_errno(dup3(arg1, arg2, host_flags));
9422         if (ret >= 0) {
9423             fd_trans_dup(arg1, arg2);
9424         }
9425         return ret;
9426     }
9427 #endif
9428 #ifdef TARGET_NR_getppid /* not on alpha */
9429     case TARGET_NR_getppid:
9430         return get_errno(getppid());
9431 #endif
9432 #ifdef TARGET_NR_getpgrp
9433     case TARGET_NR_getpgrp:
9434         return get_errno(getpgrp());
9435 #endif
9436     case TARGET_NR_setsid:
9437         return get_errno(setsid());
9438 #ifdef TARGET_NR_sigaction
9439     case TARGET_NR_sigaction:
9440         {
9441 #if defined(TARGET_MIPS)
9442 	    struct target_sigaction act, oact, *pact, *old_act;
9443 
9444 	    if (arg2) {
9445                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9446                     return -TARGET_EFAULT;
9447 		act._sa_handler = old_act->_sa_handler;
9448 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9449 		act.sa_flags = old_act->sa_flags;
9450 		unlock_user_struct(old_act, arg2, 0);
9451 		pact = &act;
9452 	    } else {
9453 		pact = NULL;
9454 	    }
9455 
9456         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9457 
9458 	    if (!is_error(ret) && arg3) {
9459                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9460                     return -TARGET_EFAULT;
9461 		old_act->_sa_handler = oact._sa_handler;
9462 		old_act->sa_flags = oact.sa_flags;
9463 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9464 		old_act->sa_mask.sig[1] = 0;
9465 		old_act->sa_mask.sig[2] = 0;
9466 		old_act->sa_mask.sig[3] = 0;
9467 		unlock_user_struct(old_act, arg3, 1);
9468 	    }
9469 #else
9470             struct target_old_sigaction *old_act;
9471             struct target_sigaction act, oact, *pact;
9472             if (arg2) {
9473                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9474                     return -TARGET_EFAULT;
9475                 act._sa_handler = old_act->_sa_handler;
9476                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9477                 act.sa_flags = old_act->sa_flags;
9478 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9479                 act.sa_restorer = old_act->sa_restorer;
9480 #endif
9481                 unlock_user_struct(old_act, arg2, 0);
9482                 pact = &act;
9483             } else {
9484                 pact = NULL;
9485             }
9486             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9487             if (!is_error(ret) && arg3) {
9488                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9489                     return -TARGET_EFAULT;
9490                 old_act->_sa_handler = oact._sa_handler;
9491                 old_act->sa_mask = oact.sa_mask.sig[0];
9492                 old_act->sa_flags = oact.sa_flags;
9493 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9494                 old_act->sa_restorer = oact.sa_restorer;
9495 #endif
9496                 unlock_user_struct(old_act, arg3, 1);
9497             }
9498 #endif
9499         }
9500         return ret;
9501 #endif
9502     case TARGET_NR_rt_sigaction:
9503         {
9504             /*
9505              * For Alpha and SPARC this is a 5 argument syscall, with
9506              * a 'restorer' parameter which must be copied into the
9507              * sa_restorer field of the sigaction struct.
9508              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9509              * and arg5 is the sigsetsize.
9510              */
9511 #if defined(TARGET_ALPHA)
9512             target_ulong sigsetsize = arg4;
9513             target_ulong restorer = arg5;
9514 #elif defined(TARGET_SPARC)
9515             target_ulong restorer = arg4;
9516             target_ulong sigsetsize = arg5;
9517 #else
9518             target_ulong sigsetsize = arg4;
9519             target_ulong restorer = 0;
9520 #endif
9521             struct target_sigaction *act = NULL;
9522             struct target_sigaction *oact = NULL;
9523 
9524             if (sigsetsize != sizeof(target_sigset_t)) {
9525                 return -TARGET_EINVAL;
9526             }
9527             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9528                 return -TARGET_EFAULT;
9529             }
9530             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9531                 ret = -TARGET_EFAULT;
9532             } else {
9533                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9534                 if (oact) {
9535                     unlock_user_struct(oact, arg3, 1);
9536                 }
9537             }
9538             if (act) {
9539                 unlock_user_struct(act, arg2, 0);
9540             }
9541         }
9542         return ret;
9543 #ifdef TARGET_NR_sgetmask /* not on alpha */
9544     case TARGET_NR_sgetmask:
9545         {
9546             sigset_t cur_set;
9547             abi_ulong target_set;
9548             ret = do_sigprocmask(0, NULL, &cur_set);
9549             if (!ret) {
9550                 host_to_target_old_sigset(&target_set, &cur_set);
9551                 ret = target_set;
9552             }
9553         }
9554         return ret;
9555 #endif
9556 #ifdef TARGET_NR_ssetmask /* not on alpha */
9557     case TARGET_NR_ssetmask:
9558         {
9559             sigset_t set, oset;
9560             abi_ulong target_set = arg1;
9561             target_to_host_old_sigset(&set, &target_set);
9562             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9563             if (!ret) {
9564                 host_to_target_old_sigset(&target_set, &oset);
9565                 ret = target_set;
9566             }
9567         }
9568         return ret;
9569 #endif
9570 #ifdef TARGET_NR_sigprocmask
9571     case TARGET_NR_sigprocmask:
9572         {
9573 #if defined(TARGET_ALPHA)
9574             sigset_t set, oldset;
9575             abi_ulong mask;
9576             int how;
9577 
9578             switch (arg1) {
9579             case TARGET_SIG_BLOCK:
9580                 how = SIG_BLOCK;
9581                 break;
9582             case TARGET_SIG_UNBLOCK:
9583                 how = SIG_UNBLOCK;
9584                 break;
9585             case TARGET_SIG_SETMASK:
9586                 how = SIG_SETMASK;
9587                 break;
9588             default:
9589                 return -TARGET_EINVAL;
9590             }
9591             mask = arg2;
9592             target_to_host_old_sigset(&set, &mask);
9593 
9594             ret = do_sigprocmask(how, &set, &oldset);
9595             if (!is_error(ret)) {
9596                 host_to_target_old_sigset(&mask, &oldset);
9597                 ret = mask;
9598                 cpu_env->ir[IR_V0] = 0; /* force no error */
9599             }
9600 #else
9601             sigset_t set, oldset, *set_ptr;
9602             int how;
9603 
9604             if (arg2) {
9605                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9606                 if (!p) {
9607                     return -TARGET_EFAULT;
9608                 }
9609                 target_to_host_old_sigset(&set, p);
9610                 unlock_user(p, arg2, 0);
9611                 set_ptr = &set;
9612                 switch (arg1) {
9613                 case TARGET_SIG_BLOCK:
9614                     how = SIG_BLOCK;
9615                     break;
9616                 case TARGET_SIG_UNBLOCK:
9617                     how = SIG_UNBLOCK;
9618                     break;
9619                 case TARGET_SIG_SETMASK:
9620                     how = SIG_SETMASK;
9621                     break;
9622                 default:
9623                     return -TARGET_EINVAL;
9624                 }
9625             } else {
9626                 how = 0;
9627                 set_ptr = NULL;
9628             }
9629             ret = do_sigprocmask(how, set_ptr, &oldset);
9630             if (!is_error(ret) && arg3) {
9631                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9632                     return -TARGET_EFAULT;
9633                 host_to_target_old_sigset(p, &oldset);
9634                 unlock_user(p, arg3, sizeof(target_sigset_t));
9635             }
9636 #endif
9637         }
9638         return ret;
9639 #endif
9640     case TARGET_NR_rt_sigprocmask:
9641         {
9642             int how = arg1;
9643             sigset_t set, oldset, *set_ptr;
9644 
9645             if (arg4 != sizeof(target_sigset_t)) {
9646                 return -TARGET_EINVAL;
9647             }
9648 
9649             if (arg2) {
9650                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9651                 if (!p) {
9652                     return -TARGET_EFAULT;
9653                 }
9654                 target_to_host_sigset(&set, p);
9655                 unlock_user(p, arg2, 0);
9656                 set_ptr = &set;
9657                 switch(how) {
9658                 case TARGET_SIG_BLOCK:
9659                     how = SIG_BLOCK;
9660                     break;
9661                 case TARGET_SIG_UNBLOCK:
9662                     how = SIG_UNBLOCK;
9663                     break;
9664                 case TARGET_SIG_SETMASK:
9665                     how = SIG_SETMASK;
9666                     break;
9667                 default:
9668                     return -TARGET_EINVAL;
9669                 }
9670             } else {
9671                 how = 0;
9672                 set_ptr = NULL;
9673             }
9674             ret = do_sigprocmask(how, set_ptr, &oldset);
9675             if (!is_error(ret) && arg3) {
9676                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9677                     return -TARGET_EFAULT;
9678                 host_to_target_sigset(p, &oldset);
9679                 unlock_user(p, arg3, sizeof(target_sigset_t));
9680             }
9681         }
9682         return ret;
9683 #ifdef TARGET_NR_sigpending
9684     case TARGET_NR_sigpending:
9685         {
9686             sigset_t set;
9687             ret = get_errno(sigpending(&set));
9688             if (!is_error(ret)) {
9689                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9690                     return -TARGET_EFAULT;
9691                 host_to_target_old_sigset(p, &set);
9692                 unlock_user(p, arg1, sizeof(target_sigset_t));
9693             }
9694         }
9695         return ret;
9696 #endif
9697     case TARGET_NR_rt_sigpending:
9698         {
9699             sigset_t set;
9700 
9701             /* Yes, this check is >, not != like most. We follow the kernel's
9702              * logic and it does it like this because it implements
9703              * NR_sigpending through the same code path, and in that case
9704              * the old_sigset_t is smaller in size.
9705              */
9706             if (arg2 > sizeof(target_sigset_t)) {
9707                 return -TARGET_EINVAL;
9708             }
9709 
9710             ret = get_errno(sigpending(&set));
9711             if (!is_error(ret)) {
9712                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9713                     return -TARGET_EFAULT;
9714                 host_to_target_sigset(p, &set);
9715                 unlock_user(p, arg1, sizeof(target_sigset_t));
9716             }
9717         }
9718         return ret;
9719 #ifdef TARGET_NR_sigsuspend
9720     case TARGET_NR_sigsuspend:
9721         {
9722             sigset_t *set;
9723 
9724 #if defined(TARGET_ALPHA)
9725             TaskState *ts = cpu->opaque;
9726             /* target_to_host_old_sigset will bswap back */
9727             abi_ulong mask = tswapal(arg1);
9728             set = &ts->sigsuspend_mask;
9729             target_to_host_old_sigset(set, &mask);
9730 #else
9731             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9732             if (ret != 0) {
9733                 return ret;
9734             }
9735 #endif
9736             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9737             finish_sigsuspend_mask(ret);
9738         }
9739         return ret;
9740 #endif
9741     case TARGET_NR_rt_sigsuspend:
9742         {
9743             sigset_t *set;
9744 
9745             ret = process_sigsuspend_mask(&set, arg1, arg2);
9746             if (ret != 0) {
9747                 return ret;
9748             }
9749             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9750             finish_sigsuspend_mask(ret);
9751         }
9752         return ret;
9753 #ifdef TARGET_NR_rt_sigtimedwait
9754     case TARGET_NR_rt_sigtimedwait:
9755         {
9756             sigset_t set;
9757             struct timespec uts, *puts;
9758             siginfo_t uinfo;
9759 
9760             if (arg4 != sizeof(target_sigset_t)) {
9761                 return -TARGET_EINVAL;
9762             }
9763 
9764             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9765                 return -TARGET_EFAULT;
9766             target_to_host_sigset(&set, p);
9767             unlock_user(p, arg1, 0);
9768             if (arg3) {
9769                 puts = &uts;
9770                 if (target_to_host_timespec(puts, arg3)) {
9771                     return -TARGET_EFAULT;
9772                 }
9773             } else {
9774                 puts = NULL;
9775             }
9776             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9777                                                  SIGSET_T_SIZE));
9778             if (!is_error(ret)) {
9779                 if (arg2) {
9780                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9781                                   0);
9782                     if (!p) {
9783                         return -TARGET_EFAULT;
9784                     }
9785                     host_to_target_siginfo(p, &uinfo);
9786                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9787                 }
9788                 ret = host_to_target_signal(ret);
9789             }
9790         }
9791         return ret;
9792 #endif
9793 #ifdef TARGET_NR_rt_sigtimedwait_time64
9794     case TARGET_NR_rt_sigtimedwait_time64:
9795         {
9796             sigset_t set;
9797             struct timespec uts, *puts;
9798             siginfo_t uinfo;
9799 
9800             if (arg4 != sizeof(target_sigset_t)) {
9801                 return -TARGET_EINVAL;
9802             }
9803 
9804             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9805             if (!p) {
9806                 return -TARGET_EFAULT;
9807             }
9808             target_to_host_sigset(&set, p);
9809             unlock_user(p, arg1, 0);
9810             if (arg3) {
9811                 puts = &uts;
9812                 if (target_to_host_timespec64(puts, arg3)) {
9813                     return -TARGET_EFAULT;
9814                 }
9815             } else {
9816                 puts = NULL;
9817             }
9818             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9819                                                  SIGSET_T_SIZE));
9820             if (!is_error(ret)) {
9821                 if (arg2) {
9822                     p = lock_user(VERIFY_WRITE, arg2,
9823                                   sizeof(target_siginfo_t), 0);
9824                     if (!p) {
9825                         return -TARGET_EFAULT;
9826                     }
9827                     host_to_target_siginfo(p, &uinfo);
9828                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9829                 }
9830                 ret = host_to_target_signal(ret);
9831             }
9832         }
9833         return ret;
9834 #endif
9835     case TARGET_NR_rt_sigqueueinfo:
9836         {
9837             siginfo_t uinfo;
9838 
9839             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9840             if (!p) {
9841                 return -TARGET_EFAULT;
9842             }
9843             target_to_host_siginfo(&uinfo, p);
9844             unlock_user(p, arg3, 0);
9845             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9846         }
9847         return ret;
9848     case TARGET_NR_rt_tgsigqueueinfo:
9849         {
9850             siginfo_t uinfo;
9851 
9852             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9853             if (!p) {
9854                 return -TARGET_EFAULT;
9855             }
9856             target_to_host_siginfo(&uinfo, p);
9857             unlock_user(p, arg4, 0);
9858             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9859         }
9860         return ret;
9861 #ifdef TARGET_NR_sigreturn
9862     case TARGET_NR_sigreturn:
9863         if (block_signals()) {
9864             return -QEMU_ERESTARTSYS;
9865         }
9866         return do_sigreturn(cpu_env);
9867 #endif
9868     case TARGET_NR_rt_sigreturn:
9869         if (block_signals()) {
9870             return -QEMU_ERESTARTSYS;
9871         }
9872         return do_rt_sigreturn(cpu_env);
9873     case TARGET_NR_sethostname:
9874         if (!(p = lock_user_string(arg1)))
9875             return -TARGET_EFAULT;
9876         ret = get_errno(sethostname(p, arg2));
9877         unlock_user(p, arg1, 0);
9878         return ret;
9879 #ifdef TARGET_NR_setrlimit
9880     case TARGET_NR_setrlimit:
9881         {
9882             int resource = target_to_host_resource(arg1);
9883             struct target_rlimit *target_rlim;
9884             struct rlimit rlim;
9885             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9886                 return -TARGET_EFAULT;
9887             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9888             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9889             unlock_user_struct(target_rlim, arg2, 0);
9890             /*
9891              * If we just passed through resource limit settings for memory then
9892              * they would also apply to QEMU's own allocations, and QEMU will
9893              * crash or hang or die if its allocations fail. Ideally we would
9894              * track the guest allocations in QEMU and apply the limits ourselves.
9895              * For now, just tell the guest the call succeeded but don't actually
9896              * limit anything.
9897              */
9898             if (resource != RLIMIT_AS &&
9899                 resource != RLIMIT_DATA &&
9900                 resource != RLIMIT_STACK) {
9901                 return get_errno(setrlimit(resource, &rlim));
9902             } else {
9903                 return 0;
9904             }
9905         }
9906 #endif
9907 #ifdef TARGET_NR_getrlimit
9908     case TARGET_NR_getrlimit:
9909         {
9910             int resource = target_to_host_resource(arg1);
9911             struct target_rlimit *target_rlim;
9912             struct rlimit rlim;
9913 
9914             ret = get_errno(getrlimit(resource, &rlim));
9915             if (!is_error(ret)) {
9916                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9917                     return -TARGET_EFAULT;
9918                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9919                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9920                 unlock_user_struct(target_rlim, arg2, 1);
9921             }
9922         }
9923         return ret;
9924 #endif
9925     case TARGET_NR_getrusage:
9926         {
9927             struct rusage rusage;
9928             ret = get_errno(getrusage(arg1, &rusage));
9929             if (!is_error(ret)) {
9930                 ret = host_to_target_rusage(arg2, &rusage);
9931             }
9932         }
9933         return ret;
9934 #if defined(TARGET_NR_gettimeofday)
9935     case TARGET_NR_gettimeofday:
9936         {
9937             struct timeval tv;
9938             struct timezone tz;
9939 
9940             ret = get_errno(gettimeofday(&tv, &tz));
9941             if (!is_error(ret)) {
9942                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9943                     return -TARGET_EFAULT;
9944                 }
9945                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9946                     return -TARGET_EFAULT;
9947                 }
9948             }
9949         }
9950         return ret;
9951 #endif
9952 #if defined(TARGET_NR_settimeofday)
9953     case TARGET_NR_settimeofday:
9954         {
9955             struct timeval tv, *ptv = NULL;
9956             struct timezone tz, *ptz = NULL;
9957 
9958             if (arg1) {
9959                 if (copy_from_user_timeval(&tv, arg1)) {
9960                     return -TARGET_EFAULT;
9961                 }
9962                 ptv = &tv;
9963             }
9964 
9965             if (arg2) {
9966                 if (copy_from_user_timezone(&tz, arg2)) {
9967                     return -TARGET_EFAULT;
9968                 }
9969                 ptz = &tz;
9970             }
9971 
9972             return get_errno(settimeofday(ptv, ptz));
9973         }
9974 #endif
9975 #if defined(TARGET_NR_select)
9976     case TARGET_NR_select:
9977 #if defined(TARGET_WANT_NI_OLD_SELECT)
9978         /* some architectures used to have old_select here
9979          * but now ENOSYS it.
9980          */
9981         ret = -TARGET_ENOSYS;
9982 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9983         ret = do_old_select(arg1);
9984 #else
9985         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9986 #endif
9987         return ret;
9988 #endif
9989 #ifdef TARGET_NR_pselect6
9990     case TARGET_NR_pselect6:
9991         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9992 #endif
9993 #ifdef TARGET_NR_pselect6_time64
9994     case TARGET_NR_pselect6_time64:
9995         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9996 #endif
9997 #ifdef TARGET_NR_symlink
9998     case TARGET_NR_symlink:
9999         {
10000             void *p2;
10001             p = lock_user_string(arg1);
10002             p2 = lock_user_string(arg2);
10003             if (!p || !p2)
10004                 ret = -TARGET_EFAULT;
10005             else
10006                 ret = get_errno(symlink(p, p2));
10007             unlock_user(p2, arg2, 0);
10008             unlock_user(p, arg1, 0);
10009         }
10010         return ret;
10011 #endif
10012 #if defined(TARGET_NR_symlinkat)
10013     case TARGET_NR_symlinkat:
10014         {
10015             void *p2;
10016             p  = lock_user_string(arg1);
10017             p2 = lock_user_string(arg3);
10018             if (!p || !p2)
10019                 ret = -TARGET_EFAULT;
10020             else
10021                 ret = get_errno(symlinkat(p, arg2, p2));
10022             unlock_user(p2, arg3, 0);
10023             unlock_user(p, arg1, 0);
10024         }
10025         return ret;
10026 #endif
10027 #ifdef TARGET_NR_readlink
10028     case TARGET_NR_readlink:
10029         {
10030             void *p2;
10031             p = lock_user_string(arg1);
10032             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10033             if (!p || !p2) {
10034                 ret = -TARGET_EFAULT;
10035             } else if (!arg3) {
10036                 /* Short circuit this for the magic exe check. */
10037                 ret = -TARGET_EINVAL;
10038             } else if (is_proc_myself((const char *)p, "exe")) {
10039                 /*
10040                  * Don't worry about sign mismatch as earlier mapping
10041                  * logic would have thrown a bad address error.
10042                  */
10043                 ret = MIN(strlen(exec_path), arg3);
10044                 /* We cannot NUL terminate the string. */
10045                 memcpy(p2, exec_path, ret);
10046             } else {
10047                 ret = get_errno(readlink(path(p), p2, arg3));
10048             }
10049             unlock_user(p2, arg2, ret);
10050             unlock_user(p, arg1, 0);
10051         }
10052         return ret;
10053 #endif
10054 #if defined(TARGET_NR_readlinkat)
10055     case TARGET_NR_readlinkat:
10056         {
10057             void *p2;
10058             p  = lock_user_string(arg2);
10059             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10060             if (!p || !p2) {
10061                 ret = -TARGET_EFAULT;
10062             } else if (!arg4) {
10063                 /* Short circuit this for the magic exe check. */
10064                 ret = -TARGET_EINVAL;
10065             } else if (is_proc_myself((const char *)p, "exe")) {
10066                 /*
10067                  * Don't worry about sign mismatch as earlier mapping
10068                  * logic would have thrown a bad address error.
10069                  */
10070                 ret = MIN(strlen(exec_path), arg4);
10071                 /* We cannot NUL terminate the string. */
10072                 memcpy(p2, exec_path, ret);
10073             } else {
10074                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10075             }
10076             unlock_user(p2, arg3, ret);
10077             unlock_user(p, arg2, 0);
10078         }
10079         return ret;
10080 #endif
10081 #ifdef TARGET_NR_swapon
10082     case TARGET_NR_swapon:
10083         if (!(p = lock_user_string(arg1)))
10084             return -TARGET_EFAULT;
10085         ret = get_errno(swapon(p, arg2));
10086         unlock_user(p, arg1, 0);
10087         return ret;
10088 #endif
10089     case TARGET_NR_reboot:
10090         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10091            /* arg4 must be ignored in all other cases */
10092            p = lock_user_string(arg4);
10093            if (!p) {
10094                return -TARGET_EFAULT;
10095            }
10096            ret = get_errno(reboot(arg1, arg2, arg3, p));
10097            unlock_user(p, arg4, 0);
10098         } else {
10099            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10100         }
10101         return ret;
10102 #ifdef TARGET_NR_mmap
10103     case TARGET_NR_mmap:
10104 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10105     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10106     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10107     || defined(TARGET_S390X)
10108         {
10109             abi_ulong *v;
10110             abi_ulong v1, v2, v3, v4, v5, v6;
10111             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10112                 return -TARGET_EFAULT;
10113             v1 = tswapal(v[0]);
10114             v2 = tswapal(v[1]);
10115             v3 = tswapal(v[2]);
10116             v4 = tswapal(v[3]);
10117             v5 = tswapal(v[4]);
10118             v6 = tswapal(v[5]);
10119             unlock_user(v, arg1, 0);
10120             ret = get_errno(target_mmap(v1, v2, v3,
10121                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10122                                         v5, v6));
10123         }
10124 #else
10125         /* mmap pointers are always untagged */
10126         ret = get_errno(target_mmap(arg1, arg2, arg3,
10127                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10128                                     arg5,
10129                                     arg6));
10130 #endif
10131         return ret;
10132 #endif
10133 #ifdef TARGET_NR_mmap2
10134     case TARGET_NR_mmap2:
10135 #ifndef MMAP_SHIFT
10136 #define MMAP_SHIFT 12
10137 #endif
10138         ret = target_mmap(arg1, arg2, arg3,
10139                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10140                           arg5, arg6 << MMAP_SHIFT);
10141         return get_errno(ret);
10142 #endif
10143     case TARGET_NR_munmap:
10144         arg1 = cpu_untagged_addr(cpu, arg1);
10145         return get_errno(target_munmap(arg1, arg2));
10146     case TARGET_NR_mprotect:
10147         arg1 = cpu_untagged_addr(cpu, arg1);
10148         {
10149             TaskState *ts = cpu->opaque;
10150             /* Special hack to detect libc making the stack executable.  */
10151             if ((arg3 & PROT_GROWSDOWN)
10152                 && arg1 >= ts->info->stack_limit
10153                 && arg1 <= ts->info->start_stack) {
10154                 arg3 &= ~PROT_GROWSDOWN;
10155                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10156                 arg1 = ts->info->stack_limit;
10157             }
10158         }
10159         return get_errno(target_mprotect(arg1, arg2, arg3));
10160 #ifdef TARGET_NR_mremap
10161     case TARGET_NR_mremap:
10162         arg1 = cpu_untagged_addr(cpu, arg1);
10163         /* mremap new_addr (arg5) is always untagged */
10164         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10165 #endif
10166         /* ??? msync/mlock/munlock are broken for softmmu.  */
10167 #ifdef TARGET_NR_msync
10168     case TARGET_NR_msync:
10169         return get_errno(msync(g2h(cpu, arg1), arg2,
10170                                target_to_host_msync_arg(arg3)));
10171 #endif
10172 #ifdef TARGET_NR_mlock
10173     case TARGET_NR_mlock:
10174         return get_errno(mlock(g2h(cpu, arg1), arg2));
10175 #endif
10176 #ifdef TARGET_NR_munlock
10177     case TARGET_NR_munlock:
10178         return get_errno(munlock(g2h(cpu, arg1), arg2));
10179 #endif
10180 #ifdef TARGET_NR_mlockall
10181     case TARGET_NR_mlockall:
10182         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10183 #endif
10184 #ifdef TARGET_NR_munlockall
10185     case TARGET_NR_munlockall:
10186         return get_errno(munlockall());
10187 #endif
10188 #ifdef TARGET_NR_truncate
10189     case TARGET_NR_truncate:
10190         if (!(p = lock_user_string(arg1)))
10191             return -TARGET_EFAULT;
10192         ret = get_errno(truncate(p, arg2));
10193         unlock_user(p, arg1, 0);
10194         return ret;
10195 #endif
10196 #ifdef TARGET_NR_ftruncate
10197     case TARGET_NR_ftruncate:
10198         return get_errno(ftruncate(arg1, arg2));
10199 #endif
10200     case TARGET_NR_fchmod:
10201         return get_errno(fchmod(arg1, arg2));
10202 #if defined(TARGET_NR_fchmodat)
10203     case TARGET_NR_fchmodat:
10204         if (!(p = lock_user_string(arg2)))
10205             return -TARGET_EFAULT;
10206         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10207         unlock_user(p, arg2, 0);
10208         return ret;
10209 #endif
10210     case TARGET_NR_getpriority:
10211         /* Note that negative values are valid for getpriority, so we must
10212            differentiate based on errno settings.  */
10213         errno = 0;
10214         ret = getpriority(arg1, arg2);
10215         if (ret == -1 && errno != 0) {
10216             return -host_to_target_errno(errno);
10217         }
10218 #ifdef TARGET_ALPHA
10219         /* Return value is the unbiased priority.  Signal no error.  */
10220         cpu_env->ir[IR_V0] = 0;
10221 #else
10222         /* Return value is a biased priority to avoid negative numbers.  */
10223         ret = 20 - ret;
10224 #endif
10225         return ret;
10226     case TARGET_NR_setpriority:
10227         return get_errno(setpriority(arg1, arg2, arg3));
10228 #ifdef TARGET_NR_statfs
10229     case TARGET_NR_statfs:
10230         if (!(p = lock_user_string(arg1))) {
10231             return -TARGET_EFAULT;
10232         }
10233         ret = get_errno(statfs(path(p), &stfs));
10234         unlock_user(p, arg1, 0);
10235     convert_statfs:
10236         if (!is_error(ret)) {
10237             struct target_statfs *target_stfs;
10238 
10239             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10240                 return -TARGET_EFAULT;
10241             __put_user(stfs.f_type, &target_stfs->f_type);
10242             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10243             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10244             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10245             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10246             __put_user(stfs.f_files, &target_stfs->f_files);
10247             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10248             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10249             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10250             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10251             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10252 #ifdef _STATFS_F_FLAGS
10253             __put_user(stfs.f_flags, &target_stfs->f_flags);
10254 #else
10255             __put_user(0, &target_stfs->f_flags);
10256 #endif
10257             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10258             unlock_user_struct(target_stfs, arg2, 1);
10259         }
10260         return ret;
10261 #endif
10262 #ifdef TARGET_NR_fstatfs
10263     case TARGET_NR_fstatfs:
10264         ret = get_errno(fstatfs(arg1, &stfs));
10265         goto convert_statfs;
10266 #endif
10267 #ifdef TARGET_NR_statfs64
10268     case TARGET_NR_statfs64:
10269         if (!(p = lock_user_string(arg1))) {
10270             return -TARGET_EFAULT;
10271         }
10272         ret = get_errno(statfs(path(p), &stfs));
10273         unlock_user(p, arg1, 0);
10274     convert_statfs64:
10275         if (!is_error(ret)) {
10276             struct target_statfs64 *target_stfs;
10277 
10278             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10279                 return -TARGET_EFAULT;
10280             __put_user(stfs.f_type, &target_stfs->f_type);
10281             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10282             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10283             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10284             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10285             __put_user(stfs.f_files, &target_stfs->f_files);
10286             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10287             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10288             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10289             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10290             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10291 #ifdef _STATFS_F_FLAGS
10292             __put_user(stfs.f_flags, &target_stfs->f_flags);
10293 #else
10294             __put_user(0, &target_stfs->f_flags);
10295 #endif
10296             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10297             unlock_user_struct(target_stfs, arg3, 1);
10298         }
10299         return ret;
10300     case TARGET_NR_fstatfs64:
10301         ret = get_errno(fstatfs(arg1, &stfs));
10302         goto convert_statfs64;
10303 #endif
10304 #ifdef TARGET_NR_socketcall
10305     case TARGET_NR_socketcall:
10306         return do_socketcall(arg1, arg2);
10307 #endif
10308 #ifdef TARGET_NR_accept
10309     case TARGET_NR_accept:
10310         return do_accept4(arg1, arg2, arg3, 0);
10311 #endif
10312 #ifdef TARGET_NR_accept4
10313     case TARGET_NR_accept4:
10314         return do_accept4(arg1, arg2, arg3, arg4);
10315 #endif
10316 #ifdef TARGET_NR_bind
10317     case TARGET_NR_bind:
10318         return do_bind(arg1, arg2, arg3);
10319 #endif
10320 #ifdef TARGET_NR_connect
10321     case TARGET_NR_connect:
10322         return do_connect(arg1, arg2, arg3);
10323 #endif
10324 #ifdef TARGET_NR_getpeername
10325     case TARGET_NR_getpeername:
10326         return do_getpeername(arg1, arg2, arg3);
10327 #endif
10328 #ifdef TARGET_NR_getsockname
10329     case TARGET_NR_getsockname:
10330         return do_getsockname(arg1, arg2, arg3);
10331 #endif
10332 #ifdef TARGET_NR_getsockopt
10333     case TARGET_NR_getsockopt:
10334         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10335 #endif
10336 #ifdef TARGET_NR_listen
10337     case TARGET_NR_listen:
10338         return get_errno(listen(arg1, arg2));
10339 #endif
10340 #ifdef TARGET_NR_recv
10341     case TARGET_NR_recv:
10342         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10343 #endif
10344 #ifdef TARGET_NR_recvfrom
10345     case TARGET_NR_recvfrom:
10346         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10347 #endif
10348 #ifdef TARGET_NR_recvmsg
10349     case TARGET_NR_recvmsg:
10350         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10351 #endif
10352 #ifdef TARGET_NR_send
10353     case TARGET_NR_send:
10354         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10355 #endif
10356 #ifdef TARGET_NR_sendmsg
10357     case TARGET_NR_sendmsg:
10358         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10359 #endif
10360 #ifdef TARGET_NR_sendmmsg
10361     case TARGET_NR_sendmmsg:
10362         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10363 #endif
10364 #ifdef TARGET_NR_recvmmsg
10365     case TARGET_NR_recvmmsg:
10366         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10367 #endif
10368 #ifdef TARGET_NR_sendto
10369     case TARGET_NR_sendto:
10370         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10371 #endif
10372 #ifdef TARGET_NR_shutdown
10373     case TARGET_NR_shutdown:
10374         return get_errno(shutdown(arg1, arg2));
10375 #endif
10376 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10377     case TARGET_NR_getrandom:
10378         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10379         if (!p) {
10380             return -TARGET_EFAULT;
10381         }
10382         ret = get_errno(getrandom(p, arg2, arg3));
10383         unlock_user(p, arg1, ret);
10384         return ret;
10385 #endif
10386 #ifdef TARGET_NR_socket
10387     case TARGET_NR_socket:
10388         return do_socket(arg1, arg2, arg3);
10389 #endif
10390 #ifdef TARGET_NR_socketpair
10391     case TARGET_NR_socketpair:
10392         return do_socketpair(arg1, arg2, arg3, arg4);
10393 #endif
10394 #ifdef TARGET_NR_setsockopt
10395     case TARGET_NR_setsockopt:
10396         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10397 #endif
10398 #if defined(TARGET_NR_syslog)
10399     case TARGET_NR_syslog:
10400         {
10401             int len = arg2;
10402 
10403             switch (arg1) {
10404             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10405             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10406             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10407             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10408             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10409             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10410             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10411             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10412                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10413             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10414             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10415             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10416                 {
10417                     if (len < 0) {
10418                         return -TARGET_EINVAL;
10419                     }
10420                     if (len == 0) {
10421                         return 0;
10422                     }
10423                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10424                     if (!p) {
10425                         return -TARGET_EFAULT;
10426                     }
10427                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10428                     unlock_user(p, arg2, arg3);
10429                 }
10430                 return ret;
10431             default:
10432                 return -TARGET_EINVAL;
10433             }
10434         }
10435         break;
10436 #endif
10437     case TARGET_NR_setitimer:
10438         {
10439             struct itimerval value, ovalue, *pvalue;
10440 
10441             if (arg2) {
10442                 pvalue = &value;
10443                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10444                     || copy_from_user_timeval(&pvalue->it_value,
10445                                               arg2 + sizeof(struct target_timeval)))
10446                     return -TARGET_EFAULT;
10447             } else {
10448                 pvalue = NULL;
10449             }
10450             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10451             if (!is_error(ret) && arg3) {
10452                 if (copy_to_user_timeval(arg3,
10453                                          &ovalue.it_interval)
10454                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10455                                             &ovalue.it_value))
10456                     return -TARGET_EFAULT;
10457             }
10458         }
10459         return ret;
10460     case TARGET_NR_getitimer:
10461         {
10462             struct itimerval value;
10463 
10464             ret = get_errno(getitimer(arg1, &value));
10465             if (!is_error(ret) && arg2) {
10466                 if (copy_to_user_timeval(arg2,
10467                                          &value.it_interval)
10468                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10469                                             &value.it_value))
10470                     return -TARGET_EFAULT;
10471             }
10472         }
10473         return ret;
10474 #ifdef TARGET_NR_stat
10475     case TARGET_NR_stat:
10476         if (!(p = lock_user_string(arg1))) {
10477             return -TARGET_EFAULT;
10478         }
10479         ret = get_errno(stat(path(p), &st));
10480         unlock_user(p, arg1, 0);
10481         goto do_stat;
10482 #endif
10483 #ifdef TARGET_NR_lstat
10484     case TARGET_NR_lstat:
10485         if (!(p = lock_user_string(arg1))) {
10486             return -TARGET_EFAULT;
10487         }
10488         ret = get_errno(lstat(path(p), &st));
10489         unlock_user(p, arg1, 0);
10490         goto do_stat;
10491 #endif
10492 #ifdef TARGET_NR_fstat
10493     case TARGET_NR_fstat:
10494         {
10495             ret = get_errno(fstat(arg1, &st));
10496 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10497         do_stat:
10498 #endif
10499             if (!is_error(ret)) {
10500                 struct target_stat *target_st;
10501 
10502                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10503                     return -TARGET_EFAULT;
10504                 memset(target_st, 0, sizeof(*target_st));
10505                 __put_user(st.st_dev, &target_st->st_dev);
10506                 __put_user(st.st_ino, &target_st->st_ino);
10507                 __put_user(st.st_mode, &target_st->st_mode);
10508                 __put_user(st.st_uid, &target_st->st_uid);
10509                 __put_user(st.st_gid, &target_st->st_gid);
10510                 __put_user(st.st_nlink, &target_st->st_nlink);
10511                 __put_user(st.st_rdev, &target_st->st_rdev);
10512                 __put_user(st.st_size, &target_st->st_size);
10513                 __put_user(st.st_blksize, &target_st->st_blksize);
10514                 __put_user(st.st_blocks, &target_st->st_blocks);
10515                 __put_user(st.st_atime, &target_st->target_st_atime);
10516                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10517                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10518 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10519                 __put_user(st.st_atim.tv_nsec,
10520                            &target_st->target_st_atime_nsec);
10521                 __put_user(st.st_mtim.tv_nsec,
10522                            &target_st->target_st_mtime_nsec);
10523                 __put_user(st.st_ctim.tv_nsec,
10524                            &target_st->target_st_ctime_nsec);
10525 #endif
10526                 unlock_user_struct(target_st, arg2, 1);
10527             }
10528         }
10529         return ret;
10530 #endif
10531     case TARGET_NR_vhangup:
10532         return get_errno(vhangup());
10533 #ifdef TARGET_NR_syscall
10534     case TARGET_NR_syscall:
10535         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10536                           arg6, arg7, arg8, 0);
10537 #endif
10538 #if defined(TARGET_NR_wait4)
10539     case TARGET_NR_wait4:
10540         {
10541             int status;
10542             abi_long status_ptr = arg2;
10543             struct rusage rusage, *rusage_ptr;
10544             abi_ulong target_rusage = arg4;
10545             abi_long rusage_err;
10546             if (target_rusage)
10547                 rusage_ptr = &rusage;
10548             else
10549                 rusage_ptr = NULL;
10550             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10551             if (!is_error(ret)) {
10552                 if (status_ptr && ret) {
10553                     status = host_to_target_waitstatus(status);
10554                     if (put_user_s32(status, status_ptr))
10555                         return -TARGET_EFAULT;
10556                 }
10557                 if (target_rusage) {
10558                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10559                     if (rusage_err) {
10560                         ret = rusage_err;
10561                     }
10562                 }
10563             }
10564         }
10565         return ret;
10566 #endif
10567 #ifdef TARGET_NR_swapoff
10568     case TARGET_NR_swapoff:
10569         if (!(p = lock_user_string(arg1)))
10570             return -TARGET_EFAULT;
10571         ret = get_errno(swapoff(p));
10572         unlock_user(p, arg1, 0);
10573         return ret;
10574 #endif
10575     case TARGET_NR_sysinfo:
10576         {
10577             struct target_sysinfo *target_value;
10578             struct sysinfo value;
10579             ret = get_errno(sysinfo(&value));
10580             if (!is_error(ret) && arg1)
10581             {
10582                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10583                     return -TARGET_EFAULT;
10584                 __put_user(value.uptime, &target_value->uptime);
10585                 __put_user(value.loads[0], &target_value->loads[0]);
10586                 __put_user(value.loads[1], &target_value->loads[1]);
10587                 __put_user(value.loads[2], &target_value->loads[2]);
10588                 __put_user(value.totalram, &target_value->totalram);
10589                 __put_user(value.freeram, &target_value->freeram);
10590                 __put_user(value.sharedram, &target_value->sharedram);
10591                 __put_user(value.bufferram, &target_value->bufferram);
10592                 __put_user(value.totalswap, &target_value->totalswap);
10593                 __put_user(value.freeswap, &target_value->freeswap);
10594                 __put_user(value.procs, &target_value->procs);
10595                 __put_user(value.totalhigh, &target_value->totalhigh);
10596                 __put_user(value.freehigh, &target_value->freehigh);
10597                 __put_user(value.mem_unit, &target_value->mem_unit);
10598                 unlock_user_struct(target_value, arg1, 1);
10599             }
10600         }
10601         return ret;
10602 #ifdef TARGET_NR_ipc
10603     case TARGET_NR_ipc:
10604         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10605 #endif
10606 #ifdef TARGET_NR_semget
10607     case TARGET_NR_semget:
10608         return get_errno(semget(arg1, arg2, arg3));
10609 #endif
10610 #ifdef TARGET_NR_semop
10611     case TARGET_NR_semop:
10612         return do_semtimedop(arg1, arg2, arg3, 0, false);
10613 #endif
10614 #ifdef TARGET_NR_semtimedop
10615     case TARGET_NR_semtimedop:
10616         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10617 #endif
10618 #ifdef TARGET_NR_semtimedop_time64
10619     case TARGET_NR_semtimedop_time64:
10620         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10621 #endif
10622 #ifdef TARGET_NR_semctl
10623     case TARGET_NR_semctl:
10624         return do_semctl(arg1, arg2, arg3, arg4);
10625 #endif
10626 #ifdef TARGET_NR_msgctl
10627     case TARGET_NR_msgctl:
10628         return do_msgctl(arg1, arg2, arg3);
10629 #endif
10630 #ifdef TARGET_NR_msgget
10631     case TARGET_NR_msgget:
10632         return get_errno(msgget(arg1, arg2));
10633 #endif
10634 #ifdef TARGET_NR_msgrcv
10635     case TARGET_NR_msgrcv:
10636         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10637 #endif
10638 #ifdef TARGET_NR_msgsnd
10639     case TARGET_NR_msgsnd:
10640         return do_msgsnd(arg1, arg2, arg3, arg4);
10641 #endif
10642 #ifdef TARGET_NR_shmget
10643     case TARGET_NR_shmget:
10644         return get_errno(shmget(arg1, arg2, arg3));
10645 #endif
10646 #ifdef TARGET_NR_shmctl
10647     case TARGET_NR_shmctl:
10648         return do_shmctl(arg1, arg2, arg3);
10649 #endif
10650 #ifdef TARGET_NR_shmat
10651     case TARGET_NR_shmat:
10652         return do_shmat(cpu_env, arg1, arg2, arg3);
10653 #endif
10654 #ifdef TARGET_NR_shmdt
10655     case TARGET_NR_shmdt:
10656         return do_shmdt(arg1);
10657 #endif
10658     case TARGET_NR_fsync:
10659         return get_errno(fsync(arg1));
10660     case TARGET_NR_clone:
10661         /* Linux manages to have three different orderings for its
10662          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10663          * match the kernel's CONFIG_CLONE_* settings.
10664          * Microblaze is further special in that it uses a sixth
10665          * implicit argument to clone for the TLS pointer.
10666          */
10667 #if defined(TARGET_MICROBLAZE)
10668         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10669 #elif defined(TARGET_CLONE_BACKWARDS)
10670         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10671 #elif defined(TARGET_CLONE_BACKWARDS2)
10672         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10673 #else
10674         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10675 #endif
10676         return ret;
10677 #ifdef __NR_exit_group
10678         /* new thread calls */
10679     case TARGET_NR_exit_group:
10680         preexit_cleanup(cpu_env, arg1);
10681         return get_errno(exit_group(arg1));
10682 #endif
10683     case TARGET_NR_setdomainname:
10684         if (!(p = lock_user_string(arg1)))
10685             return -TARGET_EFAULT;
10686         ret = get_errno(setdomainname(p, arg2));
10687         unlock_user(p, arg1, 0);
10688         return ret;
10689     case TARGET_NR_uname:
10690         /* no need to transcode because we use the linux syscall */
10691         {
10692             struct new_utsname * buf;
10693 
10694             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10695                 return -TARGET_EFAULT;
10696             ret = get_errno(sys_uname(buf));
10697             if (!is_error(ret)) {
10698                 /* Overwrite the native machine name with whatever is being
10699                    emulated. */
10700                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10701                           sizeof(buf->machine));
10702                 /* Allow the user to override the reported release.  */
10703                 if (qemu_uname_release && *qemu_uname_release) {
10704                     g_strlcpy(buf->release, qemu_uname_release,
10705                               sizeof(buf->release));
10706                 }
10707             }
10708             unlock_user_struct(buf, arg1, 1);
10709         }
10710         return ret;
10711 #ifdef TARGET_I386
10712     case TARGET_NR_modify_ldt:
10713         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10714 #if !defined(TARGET_X86_64)
10715     case TARGET_NR_vm86:
10716         return do_vm86(cpu_env, arg1, arg2);
10717 #endif
10718 #endif
10719 #if defined(TARGET_NR_adjtimex)
10720     case TARGET_NR_adjtimex:
10721         {
10722             struct timex host_buf;
10723 
10724             if (target_to_host_timex(&host_buf, arg1) != 0) {
10725                 return -TARGET_EFAULT;
10726             }
10727             ret = get_errno(adjtimex(&host_buf));
10728             if (!is_error(ret)) {
10729                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10730                     return -TARGET_EFAULT;
10731                 }
10732             }
10733         }
10734         return ret;
10735 #endif
10736 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10737     case TARGET_NR_clock_adjtime:
10738         {
10739             struct timex htx, *phtx = &htx;
10740 
10741             if (target_to_host_timex(phtx, arg2) != 0) {
10742                 return -TARGET_EFAULT;
10743             }
10744             ret = get_errno(clock_adjtime(arg1, phtx));
10745             if (!is_error(ret) && phtx) {
10746                 if (host_to_target_timex(arg2, phtx) != 0) {
10747                     return -TARGET_EFAULT;
10748                 }
10749             }
10750         }
10751         return ret;
10752 #endif
10753 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10754     case TARGET_NR_clock_adjtime64:
10755         {
10756             struct timex htx;
10757 
10758             if (target_to_host_timex64(&htx, arg2) != 0) {
10759                 return -TARGET_EFAULT;
10760             }
10761             ret = get_errno(clock_adjtime(arg1, &htx));
10762             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10763                     return -TARGET_EFAULT;
10764             }
10765         }
10766         return ret;
10767 #endif
10768     case TARGET_NR_getpgid:
10769         return get_errno(getpgid(arg1));
10770     case TARGET_NR_fchdir:
10771         return get_errno(fchdir(arg1));
10772     case TARGET_NR_personality:
10773         return get_errno(personality(arg1));
10774 #ifdef TARGET_NR__llseek /* Not on alpha */
10775     case TARGET_NR__llseek:
10776         {
10777             int64_t res;
10778 #if !defined(__NR_llseek)
10779             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10780             if (res == -1) {
10781                 ret = get_errno(res);
10782             } else {
10783                 ret = 0;
10784             }
10785 #else
10786             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10787 #endif
10788             if ((ret == 0) && put_user_s64(res, arg4)) {
10789                 return -TARGET_EFAULT;
10790             }
10791         }
10792         return ret;
10793 #endif
10794 #ifdef TARGET_NR_getdents
10795     case TARGET_NR_getdents:
10796         return do_getdents(arg1, arg2, arg3);
10797 #endif /* TARGET_NR_getdents */
10798 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10799     case TARGET_NR_getdents64:
10800         return do_getdents64(arg1, arg2, arg3);
10801 #endif /* TARGET_NR_getdents64 */
10802 #if defined(TARGET_NR__newselect)
10803     case TARGET_NR__newselect:
10804         return do_select(arg1, arg2, arg3, arg4, arg5);
10805 #endif
10806 #ifdef TARGET_NR_poll
10807     case TARGET_NR_poll:
10808         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10809 #endif
10810 #ifdef TARGET_NR_ppoll
10811     case TARGET_NR_ppoll:
10812         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10813 #endif
10814 #ifdef TARGET_NR_ppoll_time64
10815     case TARGET_NR_ppoll_time64:
10816         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10817 #endif
10818     case TARGET_NR_flock:
10819         /* NOTE: the flock constant seems to be the same for every
10820            Linux platform */
10821         return get_errno(safe_flock(arg1, arg2));
10822     case TARGET_NR_readv:
10823         {
10824             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10825             if (vec != NULL) {
10826                 ret = get_errno(safe_readv(arg1, vec, arg3));
10827                 unlock_iovec(vec, arg2, arg3, 1);
10828             } else {
10829                 ret = -host_to_target_errno(errno);
10830             }
10831         }
10832         return ret;
10833     case TARGET_NR_writev:
10834         {
10835             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10836             if (vec != NULL) {
10837                 ret = get_errno(safe_writev(arg1, vec, arg3));
10838                 unlock_iovec(vec, arg2, arg3, 0);
10839             } else {
10840                 ret = -host_to_target_errno(errno);
10841             }
10842         }
10843         return ret;
10844 #if defined(TARGET_NR_preadv)
10845     case TARGET_NR_preadv:
10846         {
10847             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10848             if (vec != NULL) {
10849                 unsigned long low, high;
10850 
10851                 target_to_host_low_high(arg4, arg5, &low, &high);
10852                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10853                 unlock_iovec(vec, arg2, arg3, 1);
10854             } else {
10855                 ret = -host_to_target_errno(errno);
10856            }
10857         }
10858         return ret;
10859 #endif
10860 #if defined(TARGET_NR_pwritev)
10861     case TARGET_NR_pwritev:
10862         {
10863             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10864             if (vec != NULL) {
10865                 unsigned long low, high;
10866 
10867                 target_to_host_low_high(arg4, arg5, &low, &high);
10868                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10869                 unlock_iovec(vec, arg2, arg3, 0);
10870             } else {
10871                 ret = -host_to_target_errno(errno);
10872            }
10873         }
10874         return ret;
10875 #endif
10876     case TARGET_NR_getsid:
10877         return get_errno(getsid(arg1));
10878 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10879     case TARGET_NR_fdatasync:
10880         return get_errno(fdatasync(arg1));
10881 #endif
10882     case TARGET_NR_sched_getaffinity:
10883         {
10884             unsigned int mask_size;
10885             unsigned long *mask;
10886 
10887             /*
10888              * sched_getaffinity needs multiples of ulong, so need to take
10889              * care of mismatches between target ulong and host ulong sizes.
10890              */
10891             if (arg2 & (sizeof(abi_ulong) - 1)) {
10892                 return -TARGET_EINVAL;
10893             }
10894             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10895 
10896             mask = alloca(mask_size);
10897             memset(mask, 0, mask_size);
10898             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10899 
10900             if (!is_error(ret)) {
10901                 if (ret > arg2) {
10902                     /* More data returned than the caller's buffer will fit.
10903                      * This only happens if sizeof(abi_long) < sizeof(long)
10904                      * and the caller passed us a buffer holding an odd number
10905                      * of abi_longs. If the host kernel is actually using the
10906                      * extra 4 bytes then fail EINVAL; otherwise we can just
10907                      * ignore them and only copy the interesting part.
10908                      */
10909                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10910                     if (numcpus > arg2 * 8) {
10911                         return -TARGET_EINVAL;
10912                     }
10913                     ret = arg2;
10914                 }
10915 
10916                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10917                     return -TARGET_EFAULT;
10918                 }
10919             }
10920         }
10921         return ret;
10922     case TARGET_NR_sched_setaffinity:
10923         {
10924             unsigned int mask_size;
10925             unsigned long *mask;
10926 
10927             /*
10928              * sched_setaffinity needs multiples of ulong, so need to take
10929              * care of mismatches between target ulong and host ulong sizes.
10930              */
10931             if (arg2 & (sizeof(abi_ulong) - 1)) {
10932                 return -TARGET_EINVAL;
10933             }
10934             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10935             mask = alloca(mask_size);
10936 
10937             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10938             if (ret) {
10939                 return ret;
10940             }
10941 
10942             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10943         }
10944     case TARGET_NR_getcpu:
10945         {
10946             unsigned cpu, node;
10947             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10948                                        arg2 ? &node : NULL,
10949                                        NULL));
10950             if (is_error(ret)) {
10951                 return ret;
10952             }
10953             if (arg1 && put_user_u32(cpu, arg1)) {
10954                 return -TARGET_EFAULT;
10955             }
10956             if (arg2 && put_user_u32(node, arg2)) {
10957                 return -TARGET_EFAULT;
10958             }
10959         }
10960         return ret;
10961     case TARGET_NR_sched_setparam:
10962         {
10963             struct target_sched_param *target_schp;
10964             struct sched_param schp;
10965 
10966             if (arg2 == 0) {
10967                 return -TARGET_EINVAL;
10968             }
10969             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10970                 return -TARGET_EFAULT;
10971             }
10972             schp.sched_priority = tswap32(target_schp->sched_priority);
10973             unlock_user_struct(target_schp, arg2, 0);
10974             return get_errno(sys_sched_setparam(arg1, &schp));
10975         }
10976     case TARGET_NR_sched_getparam:
10977         {
10978             struct target_sched_param *target_schp;
10979             struct sched_param schp;
10980 
10981             if (arg2 == 0) {
10982                 return -TARGET_EINVAL;
10983             }
10984             ret = get_errno(sys_sched_getparam(arg1, &schp));
10985             if (!is_error(ret)) {
10986                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10987                     return -TARGET_EFAULT;
10988                 }
10989                 target_schp->sched_priority = tswap32(schp.sched_priority);
10990                 unlock_user_struct(target_schp, arg2, 1);
10991             }
10992         }
10993         return ret;
10994     case TARGET_NR_sched_setscheduler:
10995         {
10996             struct target_sched_param *target_schp;
10997             struct sched_param schp;
10998             if (arg3 == 0) {
10999                 return -TARGET_EINVAL;
11000             }
11001             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11002                 return -TARGET_EFAULT;
11003             }
11004             schp.sched_priority = tswap32(target_schp->sched_priority);
11005             unlock_user_struct(target_schp, arg3, 0);
11006             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11007         }
11008     case TARGET_NR_sched_getscheduler:
11009         return get_errno(sys_sched_getscheduler(arg1));
11010     case TARGET_NR_sched_getattr:
11011         {
11012             struct target_sched_attr *target_scha;
11013             struct sched_attr scha;
11014             if (arg2 == 0) {
11015                 return -TARGET_EINVAL;
11016             }
11017             if (arg3 > sizeof(scha)) {
11018                 arg3 = sizeof(scha);
11019             }
11020             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11021             if (!is_error(ret)) {
11022                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11023                 if (!target_scha) {
11024                     return -TARGET_EFAULT;
11025                 }
11026                 target_scha->size = tswap32(scha.size);
11027                 target_scha->sched_policy = tswap32(scha.sched_policy);
11028                 target_scha->sched_flags = tswap64(scha.sched_flags);
11029                 target_scha->sched_nice = tswap32(scha.sched_nice);
11030                 target_scha->sched_priority = tswap32(scha.sched_priority);
11031                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11032                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11033                 target_scha->sched_period = tswap64(scha.sched_period);
11034                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11035                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11036                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11037                 }
11038                 unlock_user(target_scha, arg2, arg3);
11039             }
11040             return ret;
11041         }
11042     case TARGET_NR_sched_setattr:
11043         {
11044             struct target_sched_attr *target_scha;
11045             struct sched_attr scha;
11046             uint32_t size;
11047             int zeroed;
11048             if (arg2 == 0) {
11049                 return -TARGET_EINVAL;
11050             }
11051             if (get_user_u32(size, arg2)) {
11052                 return -TARGET_EFAULT;
11053             }
11054             if (!size) {
11055                 size = offsetof(struct target_sched_attr, sched_util_min);
11056             }
11057             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11058                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11059                     return -TARGET_EFAULT;
11060                 }
11061                 return -TARGET_E2BIG;
11062             }
11063 
11064             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11065             if (zeroed < 0) {
11066                 return zeroed;
11067             } else if (zeroed == 0) {
11068                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11069                     return -TARGET_EFAULT;
11070                 }
11071                 return -TARGET_E2BIG;
11072             }
11073             if (size > sizeof(struct target_sched_attr)) {
11074                 size = sizeof(struct target_sched_attr);
11075             }
11076 
11077             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11078             if (!target_scha) {
11079                 return -TARGET_EFAULT;
11080             }
11081             scha.size = size;
11082             scha.sched_policy = tswap32(target_scha->sched_policy);
11083             scha.sched_flags = tswap64(target_scha->sched_flags);
11084             scha.sched_nice = tswap32(target_scha->sched_nice);
11085             scha.sched_priority = tswap32(target_scha->sched_priority);
11086             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11087             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11088             scha.sched_period = tswap64(target_scha->sched_period);
11089             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11090                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11091                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11092             }
11093             unlock_user(target_scha, arg2, 0);
11094             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11095         }
11096     case TARGET_NR_sched_yield:
11097         return get_errno(sched_yield());
11098     case TARGET_NR_sched_get_priority_max:
11099         return get_errno(sched_get_priority_max(arg1));
11100     case TARGET_NR_sched_get_priority_min:
11101         return get_errno(sched_get_priority_min(arg1));
11102 #ifdef TARGET_NR_sched_rr_get_interval
11103     case TARGET_NR_sched_rr_get_interval:
11104         {
11105             struct timespec ts;
11106             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11107             if (!is_error(ret)) {
11108                 ret = host_to_target_timespec(arg2, &ts);
11109             }
11110         }
11111         return ret;
11112 #endif
11113 #ifdef TARGET_NR_sched_rr_get_interval_time64
11114     case TARGET_NR_sched_rr_get_interval_time64:
11115         {
11116             struct timespec ts;
11117             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11118             if (!is_error(ret)) {
11119                 ret = host_to_target_timespec64(arg2, &ts);
11120             }
11121         }
11122         return ret;
11123 #endif
11124 #if defined(TARGET_NR_nanosleep)
11125     case TARGET_NR_nanosleep:
11126         {
11127             struct timespec req, rem;
11128             target_to_host_timespec(&req, arg1);
11129             ret = get_errno(safe_nanosleep(&req, &rem));
11130             if (is_error(ret) && arg2) {
11131                 host_to_target_timespec(arg2, &rem);
11132             }
11133         }
11134         return ret;
11135 #endif
11136     case TARGET_NR_prctl:
11137         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11138         break;
11139 #ifdef TARGET_NR_arch_prctl
11140     case TARGET_NR_arch_prctl:
11141         return do_arch_prctl(cpu_env, arg1, arg2);
11142 #endif
11143 #ifdef TARGET_NR_pread64
11144     case TARGET_NR_pread64:
11145         if (regpairs_aligned(cpu_env, num)) {
11146             arg4 = arg5;
11147             arg5 = arg6;
11148         }
11149         if (arg2 == 0 && arg3 == 0) {
11150             /* Special-case NULL buffer and zero length, which should succeed */
11151             p = 0;
11152         } else {
11153             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11154             if (!p) {
11155                 return -TARGET_EFAULT;
11156             }
11157         }
11158         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11159         unlock_user(p, arg2, ret);
11160         return ret;
11161     case TARGET_NR_pwrite64:
11162         if (regpairs_aligned(cpu_env, num)) {
11163             arg4 = arg5;
11164             arg5 = arg6;
11165         }
11166         if (arg2 == 0 && arg3 == 0) {
11167             /* Special-case NULL buffer and zero length, which should succeed */
11168             p = 0;
11169         } else {
11170             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11171             if (!p) {
11172                 return -TARGET_EFAULT;
11173             }
11174         }
11175         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11176         unlock_user(p, arg2, 0);
11177         return ret;
11178 #endif
11179     case TARGET_NR_getcwd:
11180         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11181             return -TARGET_EFAULT;
11182         ret = get_errno(sys_getcwd1(p, arg2));
11183         unlock_user(p, arg1, ret);
11184         return ret;
11185     case TARGET_NR_capget:
11186     case TARGET_NR_capset:
11187     {
11188         struct target_user_cap_header *target_header;
11189         struct target_user_cap_data *target_data = NULL;
11190         struct __user_cap_header_struct header;
11191         struct __user_cap_data_struct data[2];
11192         struct __user_cap_data_struct *dataptr = NULL;
11193         int i, target_datalen;
11194         int data_items = 1;
11195 
11196         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11197             return -TARGET_EFAULT;
11198         }
11199         header.version = tswap32(target_header->version);
11200         header.pid = tswap32(target_header->pid);
11201 
11202         if (header.version != _LINUX_CAPABILITY_VERSION) {
11203             /* Version 2 and up takes pointer to two user_data structs */
11204             data_items = 2;
11205         }
11206 
11207         target_datalen = sizeof(*target_data) * data_items;
11208 
11209         if (arg2) {
11210             if (num == TARGET_NR_capget) {
11211                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11212             } else {
11213                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11214             }
11215             if (!target_data) {
11216                 unlock_user_struct(target_header, arg1, 0);
11217                 return -TARGET_EFAULT;
11218             }
11219 
11220             if (num == TARGET_NR_capset) {
11221                 for (i = 0; i < data_items; i++) {
11222                     data[i].effective = tswap32(target_data[i].effective);
11223                     data[i].permitted = tswap32(target_data[i].permitted);
11224                     data[i].inheritable = tswap32(target_data[i].inheritable);
11225                 }
11226             }
11227 
11228             dataptr = data;
11229         }
11230 
11231         if (num == TARGET_NR_capget) {
11232             ret = get_errno(capget(&header, dataptr));
11233         } else {
11234             ret = get_errno(capset(&header, dataptr));
11235         }
11236 
11237         /* The kernel always updates version for both capget and capset */
11238         target_header->version = tswap32(header.version);
11239         unlock_user_struct(target_header, arg1, 1);
11240 
11241         if (arg2) {
11242             if (num == TARGET_NR_capget) {
11243                 for (i = 0; i < data_items; i++) {
11244                     target_data[i].effective = tswap32(data[i].effective);
11245                     target_data[i].permitted = tswap32(data[i].permitted);
11246                     target_data[i].inheritable = tswap32(data[i].inheritable);
11247                 }
11248                 unlock_user(target_data, arg2, target_datalen);
11249             } else {
11250                 unlock_user(target_data, arg2, 0);
11251             }
11252         }
11253         return ret;
11254     }
11255     case TARGET_NR_sigaltstack:
11256         return do_sigaltstack(arg1, arg2, cpu_env);
11257 
11258 #ifdef CONFIG_SENDFILE
11259 #ifdef TARGET_NR_sendfile
11260     case TARGET_NR_sendfile:
11261     {
11262         off_t *offp = NULL;
11263         off_t off;
11264         if (arg3) {
11265             ret = get_user_sal(off, arg3);
11266             if (is_error(ret)) {
11267                 return ret;
11268             }
11269             offp = &off;
11270         }
11271         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11272         if (!is_error(ret) && arg3) {
11273             abi_long ret2 = put_user_sal(off, arg3);
11274             if (is_error(ret2)) {
11275                 ret = ret2;
11276             }
11277         }
11278         return ret;
11279     }
11280 #endif
11281 #ifdef TARGET_NR_sendfile64
11282     case TARGET_NR_sendfile64:
11283     {
11284         off_t *offp = NULL;
11285         off_t off;
11286         if (arg3) {
11287             ret = get_user_s64(off, arg3);
11288             if (is_error(ret)) {
11289                 return ret;
11290             }
11291             offp = &off;
11292         }
11293         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11294         if (!is_error(ret) && arg3) {
11295             abi_long ret2 = put_user_s64(off, arg3);
11296             if (is_error(ret2)) {
11297                 ret = ret2;
11298             }
11299         }
11300         return ret;
11301     }
11302 #endif
11303 #endif
11304 #ifdef TARGET_NR_vfork
11305     case TARGET_NR_vfork:
11306         return get_errno(do_fork(cpu_env,
11307                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11308                          0, 0, 0, 0));
11309 #endif
11310 #ifdef TARGET_NR_ugetrlimit
11311     case TARGET_NR_ugetrlimit:
11312     {
11313 	struct rlimit rlim;
11314 	int resource = target_to_host_resource(arg1);
11315 	ret = get_errno(getrlimit(resource, &rlim));
11316 	if (!is_error(ret)) {
11317 	    struct target_rlimit *target_rlim;
11318             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11319                 return -TARGET_EFAULT;
11320 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11321 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11322             unlock_user_struct(target_rlim, arg2, 1);
11323 	}
11324         return ret;
11325     }
11326 #endif
11327 #ifdef TARGET_NR_truncate64
11328     case TARGET_NR_truncate64:
11329         if (!(p = lock_user_string(arg1)))
11330             return -TARGET_EFAULT;
11331 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11332         unlock_user(p, arg1, 0);
11333         return ret;
11334 #endif
11335 #ifdef TARGET_NR_ftruncate64
11336     case TARGET_NR_ftruncate64:
11337         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11338 #endif
11339 #ifdef TARGET_NR_stat64
11340     case TARGET_NR_stat64:
11341         if (!(p = lock_user_string(arg1))) {
11342             return -TARGET_EFAULT;
11343         }
11344         ret = get_errno(stat(path(p), &st));
11345         unlock_user(p, arg1, 0);
11346         if (!is_error(ret))
11347             ret = host_to_target_stat64(cpu_env, arg2, &st);
11348         return ret;
11349 #endif
11350 #ifdef TARGET_NR_lstat64
11351     case TARGET_NR_lstat64:
11352         if (!(p = lock_user_string(arg1))) {
11353             return -TARGET_EFAULT;
11354         }
11355         ret = get_errno(lstat(path(p), &st));
11356         unlock_user(p, arg1, 0);
11357         if (!is_error(ret))
11358             ret = host_to_target_stat64(cpu_env, arg2, &st);
11359         return ret;
11360 #endif
11361 #ifdef TARGET_NR_fstat64
11362     case TARGET_NR_fstat64:
11363         ret = get_errno(fstat(arg1, &st));
11364         if (!is_error(ret))
11365             ret = host_to_target_stat64(cpu_env, arg2, &st);
11366         return ret;
11367 #endif
11368 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11369 #ifdef TARGET_NR_fstatat64
11370     case TARGET_NR_fstatat64:
11371 #endif
11372 #ifdef TARGET_NR_newfstatat
11373     case TARGET_NR_newfstatat:
11374 #endif
11375         if (!(p = lock_user_string(arg2))) {
11376             return -TARGET_EFAULT;
11377         }
11378         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11379         unlock_user(p, arg2, 0);
11380         if (!is_error(ret))
11381             ret = host_to_target_stat64(cpu_env, arg3, &st);
11382         return ret;
11383 #endif
11384 #if defined(TARGET_NR_statx)
11385     case TARGET_NR_statx:
11386         {
11387             struct target_statx *target_stx;
11388             int dirfd = arg1;
11389             int flags = arg3;
11390 
11391             p = lock_user_string(arg2);
11392             if (p == NULL) {
11393                 return -TARGET_EFAULT;
11394             }
11395 #if defined(__NR_statx)
11396             {
11397                 /*
11398                  * It is assumed that struct statx is architecture independent.
11399                  */
11400                 struct target_statx host_stx;
11401                 int mask = arg4;
11402 
11403                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11404                 if (!is_error(ret)) {
11405                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11406                         unlock_user(p, arg2, 0);
11407                         return -TARGET_EFAULT;
11408                     }
11409                 }
11410 
11411                 if (ret != -TARGET_ENOSYS) {
11412                     unlock_user(p, arg2, 0);
11413                     return ret;
11414                 }
11415             }
11416 #endif
11417             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11418             unlock_user(p, arg2, 0);
11419 
11420             if (!is_error(ret)) {
11421                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11422                     return -TARGET_EFAULT;
11423                 }
11424                 memset(target_stx, 0, sizeof(*target_stx));
11425                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11426                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11427                 __put_user(st.st_ino, &target_stx->stx_ino);
11428                 __put_user(st.st_mode, &target_stx->stx_mode);
11429                 __put_user(st.st_uid, &target_stx->stx_uid);
11430                 __put_user(st.st_gid, &target_stx->stx_gid);
11431                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11432                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11433                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11434                 __put_user(st.st_size, &target_stx->stx_size);
11435                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11436                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11437                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11438                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11439                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11440                 unlock_user_struct(target_stx, arg5, 1);
11441             }
11442         }
11443         return ret;
11444 #endif
11445 #ifdef TARGET_NR_lchown
11446     case TARGET_NR_lchown:
11447         if (!(p = lock_user_string(arg1)))
11448             return -TARGET_EFAULT;
11449         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11450         unlock_user(p, arg1, 0);
11451         return ret;
11452 #endif
11453 #ifdef TARGET_NR_getuid
11454     case TARGET_NR_getuid:
11455         return get_errno(high2lowuid(getuid()));
11456 #endif
11457 #ifdef TARGET_NR_getgid
11458     case TARGET_NR_getgid:
11459         return get_errno(high2lowgid(getgid()));
11460 #endif
11461 #ifdef TARGET_NR_geteuid
11462     case TARGET_NR_geteuid:
11463         return get_errno(high2lowuid(geteuid()));
11464 #endif
11465 #ifdef TARGET_NR_getegid
11466     case TARGET_NR_getegid:
11467         return get_errno(high2lowgid(getegid()));
11468 #endif
11469     case TARGET_NR_setreuid:
11470         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11471     case TARGET_NR_setregid:
11472         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11473     case TARGET_NR_getgroups:
11474         {
11475             int gidsetsize = arg1;
11476             target_id *target_grouplist;
11477             gid_t *grouplist;
11478             int i;
11479 
11480             grouplist = alloca(gidsetsize * sizeof(gid_t));
11481             ret = get_errno(getgroups(gidsetsize, grouplist));
11482             if (gidsetsize == 0)
11483                 return ret;
11484             if (!is_error(ret)) {
11485                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11486                 if (!target_grouplist)
11487                     return -TARGET_EFAULT;
11488                 for(i = 0;i < ret; i++)
11489                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11490                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11491             }
11492         }
11493         return ret;
11494     case TARGET_NR_setgroups:
11495         {
11496             int gidsetsize = arg1;
11497             target_id *target_grouplist;
11498             gid_t *grouplist = NULL;
11499             int i;
11500             if (gidsetsize) {
11501                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11502                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11503                 if (!target_grouplist) {
11504                     return -TARGET_EFAULT;
11505                 }
11506                 for (i = 0; i < gidsetsize; i++) {
11507                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11508                 }
11509                 unlock_user(target_grouplist, arg2, 0);
11510             }
11511             return get_errno(setgroups(gidsetsize, grouplist));
11512         }
11513     case TARGET_NR_fchown:
11514         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11515 #if defined(TARGET_NR_fchownat)
11516     case TARGET_NR_fchownat:
11517         if (!(p = lock_user_string(arg2)))
11518             return -TARGET_EFAULT;
11519         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11520                                  low2highgid(arg4), arg5));
11521         unlock_user(p, arg2, 0);
11522         return ret;
11523 #endif
11524 #ifdef TARGET_NR_setresuid
11525     case TARGET_NR_setresuid:
11526         return get_errno(sys_setresuid(low2highuid(arg1),
11527                                        low2highuid(arg2),
11528                                        low2highuid(arg3)));
11529 #endif
11530 #ifdef TARGET_NR_getresuid
11531     case TARGET_NR_getresuid:
11532         {
11533             uid_t ruid, euid, suid;
11534             ret = get_errno(getresuid(&ruid, &euid, &suid));
11535             if (!is_error(ret)) {
11536                 if (put_user_id(high2lowuid(ruid), arg1)
11537                     || put_user_id(high2lowuid(euid), arg2)
11538                     || put_user_id(high2lowuid(suid), arg3))
11539                     return -TARGET_EFAULT;
11540             }
11541         }
11542         return ret;
11543 #endif
11544 #ifdef TARGET_NR_getresgid
11545     case TARGET_NR_setresgid:
11546         return get_errno(sys_setresgid(low2highgid(arg1),
11547                                        low2highgid(arg2),
11548                                        low2highgid(arg3)));
11549 #endif
11550 #ifdef TARGET_NR_getresgid
11551     case TARGET_NR_getresgid:
11552         {
11553             gid_t rgid, egid, sgid;
11554             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11555             if (!is_error(ret)) {
11556                 if (put_user_id(high2lowgid(rgid), arg1)
11557                     || put_user_id(high2lowgid(egid), arg2)
11558                     || put_user_id(high2lowgid(sgid), arg3))
11559                     return -TARGET_EFAULT;
11560             }
11561         }
11562         return ret;
11563 #endif
11564 #ifdef TARGET_NR_chown
11565     case TARGET_NR_chown:
11566         if (!(p = lock_user_string(arg1)))
11567             return -TARGET_EFAULT;
11568         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11569         unlock_user(p, arg1, 0);
11570         return ret;
11571 #endif
11572     case TARGET_NR_setuid:
11573         return get_errno(sys_setuid(low2highuid(arg1)));
11574     case TARGET_NR_setgid:
11575         return get_errno(sys_setgid(low2highgid(arg1)));
11576     case TARGET_NR_setfsuid:
11577         return get_errno(setfsuid(arg1));
11578     case TARGET_NR_setfsgid:
11579         return get_errno(setfsgid(arg1));
11580 
11581 #ifdef TARGET_NR_lchown32
11582     case TARGET_NR_lchown32:
11583         if (!(p = lock_user_string(arg1)))
11584             return -TARGET_EFAULT;
11585         ret = get_errno(lchown(p, arg2, arg3));
11586         unlock_user(p, arg1, 0);
11587         return ret;
11588 #endif
11589 #ifdef TARGET_NR_getuid32
11590     case TARGET_NR_getuid32:
11591         return get_errno(getuid());
11592 #endif
11593 
11594 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11595    /* Alpha specific */
11596     case TARGET_NR_getxuid:
11597          {
11598             uid_t euid;
11599             euid=geteuid();
11600             cpu_env->ir[IR_A4]=euid;
11601          }
11602         return get_errno(getuid());
11603 #endif
11604 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11605    /* Alpha specific */
11606     case TARGET_NR_getxgid:
11607          {
11608             uid_t egid;
11609             egid=getegid();
11610             cpu_env->ir[IR_A4]=egid;
11611          }
11612         return get_errno(getgid());
11613 #endif
11614 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11615     /* Alpha specific */
11616     case TARGET_NR_osf_getsysinfo:
11617         ret = -TARGET_EOPNOTSUPP;
11618         switch (arg1) {
11619           case TARGET_GSI_IEEE_FP_CONTROL:
11620             {
11621                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11622                 uint64_t swcr = cpu_env->swcr;
11623 
11624                 swcr &= ~SWCR_STATUS_MASK;
11625                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11626 
11627                 if (put_user_u64 (swcr, arg2))
11628                         return -TARGET_EFAULT;
11629                 ret = 0;
11630             }
11631             break;
11632 
11633           /* case GSI_IEEE_STATE_AT_SIGNAL:
11634              -- Not implemented in linux kernel.
11635              case GSI_UACPROC:
11636              -- Retrieves current unaligned access state; not much used.
11637              case GSI_PROC_TYPE:
11638              -- Retrieves implver information; surely not used.
11639              case GSI_GET_HWRPB:
11640              -- Grabs a copy of the HWRPB; surely not used.
11641           */
11642         }
11643         return ret;
11644 #endif
11645 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11646     /* Alpha specific */
11647     case TARGET_NR_osf_setsysinfo:
11648         ret = -TARGET_EOPNOTSUPP;
11649         switch (arg1) {
11650           case TARGET_SSI_IEEE_FP_CONTROL:
11651             {
11652                 uint64_t swcr, fpcr;
11653 
11654                 if (get_user_u64 (swcr, arg2)) {
11655                     return -TARGET_EFAULT;
11656                 }
11657 
11658                 /*
11659                  * The kernel calls swcr_update_status to update the
11660                  * status bits from the fpcr at every point that it
11661                  * could be queried.  Therefore, we store the status
11662                  * bits only in FPCR.
11663                  */
11664                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11665 
11666                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11667                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11668                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11669                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11670                 ret = 0;
11671             }
11672             break;
11673 
11674           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11675             {
11676                 uint64_t exc, fpcr, fex;
11677 
11678                 if (get_user_u64(exc, arg2)) {
11679                     return -TARGET_EFAULT;
11680                 }
11681                 exc &= SWCR_STATUS_MASK;
11682                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11683 
11684                 /* Old exceptions are not signaled.  */
11685                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11686                 fex = exc & ~fex;
11687                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11688                 fex &= (cpu_env)->swcr;
11689 
11690                 /* Update the hardware fpcr.  */
11691                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11692                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11693 
11694                 if (fex) {
11695                     int si_code = TARGET_FPE_FLTUNK;
11696                     target_siginfo_t info;
11697 
11698                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11699                         si_code = TARGET_FPE_FLTUND;
11700                     }
11701                     if (fex & SWCR_TRAP_ENABLE_INE) {
11702                         si_code = TARGET_FPE_FLTRES;
11703                     }
11704                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11705                         si_code = TARGET_FPE_FLTUND;
11706                     }
11707                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11708                         si_code = TARGET_FPE_FLTOVF;
11709                     }
11710                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11711                         si_code = TARGET_FPE_FLTDIV;
11712                     }
11713                     if (fex & SWCR_TRAP_ENABLE_INV) {
11714                         si_code = TARGET_FPE_FLTINV;
11715                     }
11716 
11717                     info.si_signo = SIGFPE;
11718                     info.si_errno = 0;
11719                     info.si_code = si_code;
11720                     info._sifields._sigfault._addr = (cpu_env)->pc;
11721                     queue_signal(cpu_env, info.si_signo,
11722                                  QEMU_SI_FAULT, &info);
11723                 }
11724                 ret = 0;
11725             }
11726             break;
11727 
11728           /* case SSI_NVPAIRS:
11729              -- Used with SSIN_UACPROC to enable unaligned accesses.
11730              case SSI_IEEE_STATE_AT_SIGNAL:
11731              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11732              -- Not implemented in linux kernel
11733           */
11734         }
11735         return ret;
11736 #endif
11737 #ifdef TARGET_NR_osf_sigprocmask
11738     /* Alpha specific.  */
11739     case TARGET_NR_osf_sigprocmask:
11740         {
11741             abi_ulong mask;
11742             int how;
11743             sigset_t set, oldset;
11744 
11745             switch(arg1) {
11746             case TARGET_SIG_BLOCK:
11747                 how = SIG_BLOCK;
11748                 break;
11749             case TARGET_SIG_UNBLOCK:
11750                 how = SIG_UNBLOCK;
11751                 break;
11752             case TARGET_SIG_SETMASK:
11753                 how = SIG_SETMASK;
11754                 break;
11755             default:
11756                 return -TARGET_EINVAL;
11757             }
11758             mask = arg2;
11759             target_to_host_old_sigset(&set, &mask);
11760             ret = do_sigprocmask(how, &set, &oldset);
11761             if (!ret) {
11762                 host_to_target_old_sigset(&mask, &oldset);
11763                 ret = mask;
11764             }
11765         }
11766         return ret;
11767 #endif
11768 
11769 #ifdef TARGET_NR_getgid32
11770     case TARGET_NR_getgid32:
11771         return get_errno(getgid());
11772 #endif
11773 #ifdef TARGET_NR_geteuid32
11774     case TARGET_NR_geteuid32:
11775         return get_errno(geteuid());
11776 #endif
11777 #ifdef TARGET_NR_getegid32
11778     case TARGET_NR_getegid32:
11779         return get_errno(getegid());
11780 #endif
11781 #ifdef TARGET_NR_setreuid32
11782     case TARGET_NR_setreuid32:
11783         return get_errno(setreuid(arg1, arg2));
11784 #endif
11785 #ifdef TARGET_NR_setregid32
11786     case TARGET_NR_setregid32:
11787         return get_errno(setregid(arg1, arg2));
11788 #endif
11789 #ifdef TARGET_NR_getgroups32
11790     case TARGET_NR_getgroups32:
11791         {
11792             int gidsetsize = arg1;
11793             uint32_t *target_grouplist;
11794             gid_t *grouplist;
11795             int i;
11796 
11797             grouplist = alloca(gidsetsize * sizeof(gid_t));
11798             ret = get_errno(getgroups(gidsetsize, grouplist));
11799             if (gidsetsize == 0)
11800                 return ret;
11801             if (!is_error(ret)) {
11802                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11803                 if (!target_grouplist) {
11804                     return -TARGET_EFAULT;
11805                 }
11806                 for(i = 0;i < ret; i++)
11807                     target_grouplist[i] = tswap32(grouplist[i]);
11808                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11809             }
11810         }
11811         return ret;
11812 #endif
11813 #ifdef TARGET_NR_setgroups32
11814     case TARGET_NR_setgroups32:
11815         {
11816             int gidsetsize = arg1;
11817             uint32_t *target_grouplist;
11818             gid_t *grouplist;
11819             int i;
11820 
11821             grouplist = alloca(gidsetsize * sizeof(gid_t));
11822             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11823             if (!target_grouplist) {
11824                 return -TARGET_EFAULT;
11825             }
11826             for(i = 0;i < gidsetsize; i++)
11827                 grouplist[i] = tswap32(target_grouplist[i]);
11828             unlock_user(target_grouplist, arg2, 0);
11829             return get_errno(setgroups(gidsetsize, grouplist));
11830         }
11831 #endif
11832 #ifdef TARGET_NR_fchown32
11833     case TARGET_NR_fchown32:
11834         return get_errno(fchown(arg1, arg2, arg3));
11835 #endif
11836 #ifdef TARGET_NR_setresuid32
11837     case TARGET_NR_setresuid32:
11838         return get_errno(sys_setresuid(arg1, arg2, arg3));
11839 #endif
11840 #ifdef TARGET_NR_getresuid32
11841     case TARGET_NR_getresuid32:
11842         {
11843             uid_t ruid, euid, suid;
11844             ret = get_errno(getresuid(&ruid, &euid, &suid));
11845             if (!is_error(ret)) {
11846                 if (put_user_u32(ruid, arg1)
11847                     || put_user_u32(euid, arg2)
11848                     || put_user_u32(suid, arg3))
11849                     return -TARGET_EFAULT;
11850             }
11851         }
11852         return ret;
11853 #endif
11854 #ifdef TARGET_NR_setresgid32
11855     case TARGET_NR_setresgid32:
11856         return get_errno(sys_setresgid(arg1, arg2, arg3));
11857 #endif
11858 #ifdef TARGET_NR_getresgid32
11859     case TARGET_NR_getresgid32:
11860         {
11861             gid_t rgid, egid, sgid;
11862             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11863             if (!is_error(ret)) {
11864                 if (put_user_u32(rgid, arg1)
11865                     || put_user_u32(egid, arg2)
11866                     || put_user_u32(sgid, arg3))
11867                     return -TARGET_EFAULT;
11868             }
11869         }
11870         return ret;
11871 #endif
11872 #ifdef TARGET_NR_chown32
11873     case TARGET_NR_chown32:
11874         if (!(p = lock_user_string(arg1)))
11875             return -TARGET_EFAULT;
11876         ret = get_errno(chown(p, arg2, arg3));
11877         unlock_user(p, arg1, 0);
11878         return ret;
11879 #endif
11880 #ifdef TARGET_NR_setuid32
11881     case TARGET_NR_setuid32:
11882         return get_errno(sys_setuid(arg1));
11883 #endif
11884 #ifdef TARGET_NR_setgid32
11885     case TARGET_NR_setgid32:
11886         return get_errno(sys_setgid(arg1));
11887 #endif
11888 #ifdef TARGET_NR_setfsuid32
11889     case TARGET_NR_setfsuid32:
11890         return get_errno(setfsuid(arg1));
11891 #endif
11892 #ifdef TARGET_NR_setfsgid32
11893     case TARGET_NR_setfsgid32:
11894         return get_errno(setfsgid(arg1));
11895 #endif
11896 #ifdef TARGET_NR_mincore
11897     case TARGET_NR_mincore:
11898         {
11899             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11900             if (!a) {
11901                 return -TARGET_ENOMEM;
11902             }
11903             p = lock_user_string(arg3);
11904             if (!p) {
11905                 ret = -TARGET_EFAULT;
11906             } else {
11907                 ret = get_errno(mincore(a, arg2, p));
11908                 unlock_user(p, arg3, ret);
11909             }
11910             unlock_user(a, arg1, 0);
11911         }
11912         return ret;
11913 #endif
11914 #ifdef TARGET_NR_arm_fadvise64_64
11915     case TARGET_NR_arm_fadvise64_64:
11916         /* arm_fadvise64_64 looks like fadvise64_64 but
11917          * with different argument order: fd, advice, offset, len
11918          * rather than the usual fd, offset, len, advice.
11919          * Note that offset and len are both 64-bit so appear as
11920          * pairs of 32-bit registers.
11921          */
11922         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11923                             target_offset64(arg5, arg6), arg2);
11924         return -host_to_target_errno(ret);
11925 #endif
11926 
11927 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11928 
11929 #ifdef TARGET_NR_fadvise64_64
11930     case TARGET_NR_fadvise64_64:
11931 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11932         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11933         ret = arg2;
11934         arg2 = arg3;
11935         arg3 = arg4;
11936         arg4 = arg5;
11937         arg5 = arg6;
11938         arg6 = ret;
11939 #else
11940         /* 6 args: fd, offset (high, low), len (high, low), advice */
11941         if (regpairs_aligned(cpu_env, num)) {
11942             /* offset is in (3,4), len in (5,6) and advice in 7 */
11943             arg2 = arg3;
11944             arg3 = arg4;
11945             arg4 = arg5;
11946             arg5 = arg6;
11947             arg6 = arg7;
11948         }
11949 #endif
11950         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11951                             target_offset64(arg4, arg5), arg6);
11952         return -host_to_target_errno(ret);
11953 #endif
11954 
11955 #ifdef TARGET_NR_fadvise64
11956     case TARGET_NR_fadvise64:
11957         /* 5 args: fd, offset (high, low), len, advice */
11958         if (regpairs_aligned(cpu_env, num)) {
11959             /* offset is in (3,4), len in 5 and advice in 6 */
11960             arg2 = arg3;
11961             arg3 = arg4;
11962             arg4 = arg5;
11963             arg5 = arg6;
11964         }
11965         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11966         return -host_to_target_errno(ret);
11967 #endif
11968 
11969 #else /* not a 32-bit ABI */
11970 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11971 #ifdef TARGET_NR_fadvise64_64
11972     case TARGET_NR_fadvise64_64:
11973 #endif
11974 #ifdef TARGET_NR_fadvise64
11975     case TARGET_NR_fadvise64:
11976 #endif
11977 #ifdef TARGET_S390X
11978         switch (arg4) {
11979         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11980         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11981         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11982         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11983         default: break;
11984         }
11985 #endif
11986         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11987 #endif
11988 #endif /* end of 64-bit ABI fadvise handling */
11989 
11990 #ifdef TARGET_NR_madvise
11991     case TARGET_NR_madvise:
11992         return target_madvise(arg1, arg2, arg3);
11993 #endif
11994 #ifdef TARGET_NR_fcntl64
11995     case TARGET_NR_fcntl64:
11996     {
11997         int cmd;
11998         struct flock64 fl;
11999         from_flock64_fn *copyfrom = copy_from_user_flock64;
12000         to_flock64_fn *copyto = copy_to_user_flock64;
12001 
12002 #ifdef TARGET_ARM
12003         if (!cpu_env->eabi) {
12004             copyfrom = copy_from_user_oabi_flock64;
12005             copyto = copy_to_user_oabi_flock64;
12006         }
12007 #endif
12008 
12009         cmd = target_to_host_fcntl_cmd(arg2);
12010         if (cmd == -TARGET_EINVAL) {
12011             return cmd;
12012         }
12013 
12014         switch(arg2) {
12015         case TARGET_F_GETLK64:
12016             ret = copyfrom(&fl, arg3);
12017             if (ret) {
12018                 break;
12019             }
12020             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12021             if (ret == 0) {
12022                 ret = copyto(arg3, &fl);
12023             }
12024 	    break;
12025 
12026         case TARGET_F_SETLK64:
12027         case TARGET_F_SETLKW64:
12028             ret = copyfrom(&fl, arg3);
12029             if (ret) {
12030                 break;
12031             }
12032             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12033 	    break;
12034         default:
12035             ret = do_fcntl(arg1, arg2, arg3);
12036             break;
12037         }
12038         return ret;
12039     }
12040 #endif
12041 #ifdef TARGET_NR_cacheflush
12042     case TARGET_NR_cacheflush:
12043         /* self-modifying code is handled automatically, so nothing needed */
12044         return 0;
12045 #endif
12046 #ifdef TARGET_NR_getpagesize
12047     case TARGET_NR_getpagesize:
12048         return TARGET_PAGE_SIZE;
12049 #endif
12050     case TARGET_NR_gettid:
12051         return get_errno(sys_gettid());
12052 #ifdef TARGET_NR_readahead
12053     case TARGET_NR_readahead:
12054 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12055         if (regpairs_aligned(cpu_env, num)) {
12056             arg2 = arg3;
12057             arg3 = arg4;
12058             arg4 = arg5;
12059         }
12060         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12061 #else
12062         ret = get_errno(readahead(arg1, arg2, arg3));
12063 #endif
12064         return ret;
12065 #endif
12066 #ifdef CONFIG_ATTR
12067 #ifdef TARGET_NR_setxattr
12068     case TARGET_NR_listxattr:
12069     case TARGET_NR_llistxattr:
12070     {
12071         void *p, *b = 0;
12072         if (arg2) {
12073             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12074             if (!b) {
12075                 return -TARGET_EFAULT;
12076             }
12077         }
12078         p = lock_user_string(arg1);
12079         if (p) {
12080             if (num == TARGET_NR_listxattr) {
12081                 ret = get_errno(listxattr(p, b, arg3));
12082             } else {
12083                 ret = get_errno(llistxattr(p, b, arg3));
12084             }
12085         } else {
12086             ret = -TARGET_EFAULT;
12087         }
12088         unlock_user(p, arg1, 0);
12089         unlock_user(b, arg2, arg3);
12090         return ret;
12091     }
12092     case TARGET_NR_flistxattr:
12093     {
12094         void *b = 0;
12095         if (arg2) {
12096             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12097             if (!b) {
12098                 return -TARGET_EFAULT;
12099             }
12100         }
12101         ret = get_errno(flistxattr(arg1, b, arg3));
12102         unlock_user(b, arg2, arg3);
12103         return ret;
12104     }
12105     case TARGET_NR_setxattr:
12106     case TARGET_NR_lsetxattr:
12107         {
12108             void *p, *n, *v = 0;
12109             if (arg3) {
12110                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12111                 if (!v) {
12112                     return -TARGET_EFAULT;
12113                 }
12114             }
12115             p = lock_user_string(arg1);
12116             n = lock_user_string(arg2);
12117             if (p && n) {
12118                 if (num == TARGET_NR_setxattr) {
12119                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12120                 } else {
12121                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12122                 }
12123             } else {
12124                 ret = -TARGET_EFAULT;
12125             }
12126             unlock_user(p, arg1, 0);
12127             unlock_user(n, arg2, 0);
12128             unlock_user(v, arg3, 0);
12129         }
12130         return ret;
12131     case TARGET_NR_fsetxattr:
12132         {
12133             void *n, *v = 0;
12134             if (arg3) {
12135                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12136                 if (!v) {
12137                     return -TARGET_EFAULT;
12138                 }
12139             }
12140             n = lock_user_string(arg2);
12141             if (n) {
12142                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12143             } else {
12144                 ret = -TARGET_EFAULT;
12145             }
12146             unlock_user(n, arg2, 0);
12147             unlock_user(v, arg3, 0);
12148         }
12149         return ret;
12150     case TARGET_NR_getxattr:
12151     case TARGET_NR_lgetxattr:
12152         {
12153             void *p, *n, *v = 0;
12154             if (arg3) {
12155                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12156                 if (!v) {
12157                     return -TARGET_EFAULT;
12158                 }
12159             }
12160             p = lock_user_string(arg1);
12161             n = lock_user_string(arg2);
12162             if (p && n) {
12163                 if (num == TARGET_NR_getxattr) {
12164                     ret = get_errno(getxattr(p, n, v, arg4));
12165                 } else {
12166                     ret = get_errno(lgetxattr(p, n, v, arg4));
12167                 }
12168             } else {
12169                 ret = -TARGET_EFAULT;
12170             }
12171             unlock_user(p, arg1, 0);
12172             unlock_user(n, arg2, 0);
12173             unlock_user(v, arg3, arg4);
12174         }
12175         return ret;
12176     case TARGET_NR_fgetxattr:
12177         {
12178             void *n, *v = 0;
12179             if (arg3) {
12180                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12181                 if (!v) {
12182                     return -TARGET_EFAULT;
12183                 }
12184             }
12185             n = lock_user_string(arg2);
12186             if (n) {
12187                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12188             } else {
12189                 ret = -TARGET_EFAULT;
12190             }
12191             unlock_user(n, arg2, 0);
12192             unlock_user(v, arg3, arg4);
12193         }
12194         return ret;
12195     case TARGET_NR_removexattr:
12196     case TARGET_NR_lremovexattr:
12197         {
12198             void *p, *n;
12199             p = lock_user_string(arg1);
12200             n = lock_user_string(arg2);
12201             if (p && n) {
12202                 if (num == TARGET_NR_removexattr) {
12203                     ret = get_errno(removexattr(p, n));
12204                 } else {
12205                     ret = get_errno(lremovexattr(p, n));
12206                 }
12207             } else {
12208                 ret = -TARGET_EFAULT;
12209             }
12210             unlock_user(p, arg1, 0);
12211             unlock_user(n, arg2, 0);
12212         }
12213         return ret;
12214     case TARGET_NR_fremovexattr:
12215         {
12216             void *n;
12217             n = lock_user_string(arg2);
12218             if (n) {
12219                 ret = get_errno(fremovexattr(arg1, n));
12220             } else {
12221                 ret = -TARGET_EFAULT;
12222             }
12223             unlock_user(n, arg2, 0);
12224         }
12225         return ret;
12226 #endif
12227 #endif /* CONFIG_ATTR */
12228 #ifdef TARGET_NR_set_thread_area
12229     case TARGET_NR_set_thread_area:
12230 #if defined(TARGET_MIPS)
12231       cpu_env->active_tc.CP0_UserLocal = arg1;
12232       return 0;
12233 #elif defined(TARGET_CRIS)
12234       if (arg1 & 0xff)
12235           ret = -TARGET_EINVAL;
12236       else {
12237           cpu_env->pregs[PR_PID] = arg1;
12238           ret = 0;
12239       }
12240       return ret;
12241 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12242       return do_set_thread_area(cpu_env, arg1);
12243 #elif defined(TARGET_M68K)
12244       {
12245           TaskState *ts = cpu->opaque;
12246           ts->tp_value = arg1;
12247           return 0;
12248       }
12249 #else
12250       return -TARGET_ENOSYS;
12251 #endif
12252 #endif
12253 #ifdef TARGET_NR_get_thread_area
12254     case TARGET_NR_get_thread_area:
12255 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12256         return do_get_thread_area(cpu_env, arg1);
12257 #elif defined(TARGET_M68K)
12258         {
12259             TaskState *ts = cpu->opaque;
12260             return ts->tp_value;
12261         }
12262 #else
12263         return -TARGET_ENOSYS;
12264 #endif
12265 #endif
12266 #ifdef TARGET_NR_getdomainname
12267     case TARGET_NR_getdomainname:
12268         return -TARGET_ENOSYS;
12269 #endif
12270 
12271 #ifdef TARGET_NR_clock_settime
12272     case TARGET_NR_clock_settime:
12273     {
12274         struct timespec ts;
12275 
12276         ret = target_to_host_timespec(&ts, arg2);
12277         if (!is_error(ret)) {
12278             ret = get_errno(clock_settime(arg1, &ts));
12279         }
12280         return ret;
12281     }
12282 #endif
12283 #ifdef TARGET_NR_clock_settime64
12284     case TARGET_NR_clock_settime64:
12285     {
12286         struct timespec ts;
12287 
12288         ret = target_to_host_timespec64(&ts, arg2);
12289         if (!is_error(ret)) {
12290             ret = get_errno(clock_settime(arg1, &ts));
12291         }
12292         return ret;
12293     }
12294 #endif
12295 #ifdef TARGET_NR_clock_gettime
12296     case TARGET_NR_clock_gettime:
12297     {
12298         struct timespec ts;
12299         ret = get_errno(clock_gettime(arg1, &ts));
12300         if (!is_error(ret)) {
12301             ret = host_to_target_timespec(arg2, &ts);
12302         }
12303         return ret;
12304     }
12305 #endif
12306 #ifdef TARGET_NR_clock_gettime64
12307     case TARGET_NR_clock_gettime64:
12308     {
12309         struct timespec ts;
12310         ret = get_errno(clock_gettime(arg1, &ts));
12311         if (!is_error(ret)) {
12312             ret = host_to_target_timespec64(arg2, &ts);
12313         }
12314         return ret;
12315     }
12316 #endif
12317 #ifdef TARGET_NR_clock_getres
12318     case TARGET_NR_clock_getres:
12319     {
12320         struct timespec ts;
12321         ret = get_errno(clock_getres(arg1, &ts));
12322         if (!is_error(ret)) {
12323             host_to_target_timespec(arg2, &ts);
12324         }
12325         return ret;
12326     }
12327 #endif
12328 #ifdef TARGET_NR_clock_getres_time64
12329     case TARGET_NR_clock_getres_time64:
12330     {
12331         struct timespec ts;
12332         ret = get_errno(clock_getres(arg1, &ts));
12333         if (!is_error(ret)) {
12334             host_to_target_timespec64(arg2, &ts);
12335         }
12336         return ret;
12337     }
12338 #endif
12339 #ifdef TARGET_NR_clock_nanosleep
12340     case TARGET_NR_clock_nanosleep:
12341     {
12342         struct timespec ts;
12343         if (target_to_host_timespec(&ts, arg3)) {
12344             return -TARGET_EFAULT;
12345         }
12346         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12347                                              &ts, arg4 ? &ts : NULL));
12348         /*
12349          * if the call is interrupted by a signal handler, it fails
12350          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12351          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12352          */
12353         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12354             host_to_target_timespec(arg4, &ts)) {
12355               return -TARGET_EFAULT;
12356         }
12357 
12358         return ret;
12359     }
12360 #endif
12361 #ifdef TARGET_NR_clock_nanosleep_time64
12362     case TARGET_NR_clock_nanosleep_time64:
12363     {
12364         struct timespec ts;
12365 
12366         if (target_to_host_timespec64(&ts, arg3)) {
12367             return -TARGET_EFAULT;
12368         }
12369 
12370         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12371                                              &ts, arg4 ? &ts : NULL));
12372 
12373         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12374             host_to_target_timespec64(arg4, &ts)) {
12375             return -TARGET_EFAULT;
12376         }
12377         return ret;
12378     }
12379 #endif
12380 
12381 #if defined(TARGET_NR_set_tid_address)
12382     case TARGET_NR_set_tid_address:
12383     {
12384         TaskState *ts = cpu->opaque;
12385         ts->child_tidptr = arg1;
12386         /* do not call host set_tid_address() syscall, instead return tid() */
12387         return get_errno(sys_gettid());
12388     }
12389 #endif
12390 
12391     case TARGET_NR_tkill:
12392         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12393 
12394     case TARGET_NR_tgkill:
12395         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12396                          target_to_host_signal(arg3)));
12397 
12398 #ifdef TARGET_NR_set_robust_list
12399     case TARGET_NR_set_robust_list:
12400     case TARGET_NR_get_robust_list:
12401         /* The ABI for supporting robust futexes has userspace pass
12402          * the kernel a pointer to a linked list which is updated by
12403          * userspace after the syscall; the list is walked by the kernel
12404          * when the thread exits. Since the linked list in QEMU guest
12405          * memory isn't a valid linked list for the host and we have
12406          * no way to reliably intercept the thread-death event, we can't
12407          * support these. Silently return ENOSYS so that guest userspace
12408          * falls back to a non-robust futex implementation (which should
12409          * be OK except in the corner case of the guest crashing while
12410          * holding a mutex that is shared with another process via
12411          * shared memory).
12412          */
12413         return -TARGET_ENOSYS;
12414 #endif
12415 
12416 #if defined(TARGET_NR_utimensat)
12417     case TARGET_NR_utimensat:
12418         {
12419             struct timespec *tsp, ts[2];
12420             if (!arg3) {
12421                 tsp = NULL;
12422             } else {
12423                 if (target_to_host_timespec(ts, arg3)) {
12424                     return -TARGET_EFAULT;
12425                 }
12426                 if (target_to_host_timespec(ts + 1, arg3 +
12427                                             sizeof(struct target_timespec))) {
12428                     return -TARGET_EFAULT;
12429                 }
12430                 tsp = ts;
12431             }
12432             if (!arg2)
12433                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12434             else {
12435                 if (!(p = lock_user_string(arg2))) {
12436                     return -TARGET_EFAULT;
12437                 }
12438                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12439                 unlock_user(p, arg2, 0);
12440             }
12441         }
12442         return ret;
12443 #endif
12444 #ifdef TARGET_NR_utimensat_time64
12445     case TARGET_NR_utimensat_time64:
12446         {
12447             struct timespec *tsp, ts[2];
12448             if (!arg3) {
12449                 tsp = NULL;
12450             } else {
12451                 if (target_to_host_timespec64(ts, arg3)) {
12452                     return -TARGET_EFAULT;
12453                 }
12454                 if (target_to_host_timespec64(ts + 1, arg3 +
12455                                      sizeof(struct target__kernel_timespec))) {
12456                     return -TARGET_EFAULT;
12457                 }
12458                 tsp = ts;
12459             }
12460             if (!arg2)
12461                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12462             else {
12463                 p = lock_user_string(arg2);
12464                 if (!p) {
12465                     return -TARGET_EFAULT;
12466                 }
12467                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12468                 unlock_user(p, arg2, 0);
12469             }
12470         }
12471         return ret;
12472 #endif
12473 #ifdef TARGET_NR_futex
12474     case TARGET_NR_futex:
12475         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12476 #endif
12477 #ifdef TARGET_NR_futex_time64
12478     case TARGET_NR_futex_time64:
12479         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12480 #endif
12481 #ifdef CONFIG_INOTIFY
12482 #if defined(TARGET_NR_inotify_init)
12483     case TARGET_NR_inotify_init:
12484         ret = get_errno(inotify_init());
12485         if (ret >= 0) {
12486             fd_trans_register(ret, &target_inotify_trans);
12487         }
12488         return ret;
12489 #endif
12490 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12491     case TARGET_NR_inotify_init1:
12492         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12493                                           fcntl_flags_tbl)));
12494         if (ret >= 0) {
12495             fd_trans_register(ret, &target_inotify_trans);
12496         }
12497         return ret;
12498 #endif
12499 #if defined(TARGET_NR_inotify_add_watch)
12500     case TARGET_NR_inotify_add_watch:
12501         p = lock_user_string(arg2);
12502         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12503         unlock_user(p, arg2, 0);
12504         return ret;
12505 #endif
12506 #if defined(TARGET_NR_inotify_rm_watch)
12507     case TARGET_NR_inotify_rm_watch:
12508         return get_errno(inotify_rm_watch(arg1, arg2));
12509 #endif
12510 #endif
12511 
12512 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12513     case TARGET_NR_mq_open:
12514         {
12515             struct mq_attr posix_mq_attr;
12516             struct mq_attr *pposix_mq_attr;
12517             int host_flags;
12518 
12519             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12520             pposix_mq_attr = NULL;
12521             if (arg4) {
12522                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12523                     return -TARGET_EFAULT;
12524                 }
12525                 pposix_mq_attr = &posix_mq_attr;
12526             }
12527             p = lock_user_string(arg1 - 1);
12528             if (!p) {
12529                 return -TARGET_EFAULT;
12530             }
12531             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12532             unlock_user (p, arg1, 0);
12533         }
12534         return ret;
12535 
12536     case TARGET_NR_mq_unlink:
12537         p = lock_user_string(arg1 - 1);
12538         if (!p) {
12539             return -TARGET_EFAULT;
12540         }
12541         ret = get_errno(mq_unlink(p));
12542         unlock_user (p, arg1, 0);
12543         return ret;
12544 
12545 #ifdef TARGET_NR_mq_timedsend
12546     case TARGET_NR_mq_timedsend:
12547         {
12548             struct timespec ts;
12549 
12550             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12551             if (arg5 != 0) {
12552                 if (target_to_host_timespec(&ts, arg5)) {
12553                     return -TARGET_EFAULT;
12554                 }
12555                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12556                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12557                     return -TARGET_EFAULT;
12558                 }
12559             } else {
12560                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12561             }
12562             unlock_user (p, arg2, arg3);
12563         }
12564         return ret;
12565 #endif
12566 #ifdef TARGET_NR_mq_timedsend_time64
12567     case TARGET_NR_mq_timedsend_time64:
12568         {
12569             struct timespec ts;
12570 
12571             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12572             if (arg5 != 0) {
12573                 if (target_to_host_timespec64(&ts, arg5)) {
12574                     return -TARGET_EFAULT;
12575                 }
12576                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12577                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12578                     return -TARGET_EFAULT;
12579                 }
12580             } else {
12581                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12582             }
12583             unlock_user(p, arg2, arg3);
12584         }
12585         return ret;
12586 #endif
12587 
12588 #ifdef TARGET_NR_mq_timedreceive
12589     case TARGET_NR_mq_timedreceive:
12590         {
12591             struct timespec ts;
12592             unsigned int prio;
12593 
12594             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12595             if (arg5 != 0) {
12596                 if (target_to_host_timespec(&ts, arg5)) {
12597                     return -TARGET_EFAULT;
12598                 }
12599                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12600                                                      &prio, &ts));
12601                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12602                     return -TARGET_EFAULT;
12603                 }
12604             } else {
12605                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12606                                                      &prio, NULL));
12607             }
12608             unlock_user (p, arg2, arg3);
12609             if (arg4 != 0)
12610                 put_user_u32(prio, arg4);
12611         }
12612         return ret;
12613 #endif
12614 #ifdef TARGET_NR_mq_timedreceive_time64
12615     case TARGET_NR_mq_timedreceive_time64:
12616         {
12617             struct timespec ts;
12618             unsigned int prio;
12619 
12620             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12621             if (arg5 != 0) {
12622                 if (target_to_host_timespec64(&ts, arg5)) {
12623                     return -TARGET_EFAULT;
12624                 }
12625                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12626                                                      &prio, &ts));
12627                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12628                     return -TARGET_EFAULT;
12629                 }
12630             } else {
12631                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12632                                                      &prio, NULL));
12633             }
12634             unlock_user(p, arg2, arg3);
12635             if (arg4 != 0) {
12636                 put_user_u32(prio, arg4);
12637             }
12638         }
12639         return ret;
12640 #endif
12641 
12642     /* Not implemented for now... */
12643 /*     case TARGET_NR_mq_notify: */
12644 /*         break; */
12645 
12646     case TARGET_NR_mq_getsetattr:
12647         {
12648             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12649             ret = 0;
12650             if (arg2 != 0) {
12651                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12652                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12653                                            &posix_mq_attr_out));
12654             } else if (arg3 != 0) {
12655                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12656             }
12657             if (ret == 0 && arg3 != 0) {
12658                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12659             }
12660         }
12661         return ret;
12662 #endif
12663 
12664 #ifdef CONFIG_SPLICE
12665 #ifdef TARGET_NR_tee
12666     case TARGET_NR_tee:
12667         {
12668             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12669         }
12670         return ret;
12671 #endif
12672 #ifdef TARGET_NR_splice
12673     case TARGET_NR_splice:
12674         {
12675             loff_t loff_in, loff_out;
12676             loff_t *ploff_in = NULL, *ploff_out = NULL;
12677             if (arg2) {
12678                 if (get_user_u64(loff_in, arg2)) {
12679                     return -TARGET_EFAULT;
12680                 }
12681                 ploff_in = &loff_in;
12682             }
12683             if (arg4) {
12684                 if (get_user_u64(loff_out, arg4)) {
12685                     return -TARGET_EFAULT;
12686                 }
12687                 ploff_out = &loff_out;
12688             }
12689             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12690             if (arg2) {
12691                 if (put_user_u64(loff_in, arg2)) {
12692                     return -TARGET_EFAULT;
12693                 }
12694             }
12695             if (arg4) {
12696                 if (put_user_u64(loff_out, arg4)) {
12697                     return -TARGET_EFAULT;
12698                 }
12699             }
12700         }
12701         return ret;
12702 #endif
12703 #ifdef TARGET_NR_vmsplice
12704 	case TARGET_NR_vmsplice:
12705         {
12706             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12707             if (vec != NULL) {
12708                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12709                 unlock_iovec(vec, arg2, arg3, 0);
12710             } else {
12711                 ret = -host_to_target_errno(errno);
12712             }
12713         }
12714         return ret;
12715 #endif
12716 #endif /* CONFIG_SPLICE */
12717 #ifdef CONFIG_EVENTFD
12718 #if defined(TARGET_NR_eventfd)
12719     case TARGET_NR_eventfd:
12720         ret = get_errno(eventfd(arg1, 0));
12721         if (ret >= 0) {
12722             fd_trans_register(ret, &target_eventfd_trans);
12723         }
12724         return ret;
12725 #endif
12726 #if defined(TARGET_NR_eventfd2)
12727     case TARGET_NR_eventfd2:
12728     {
12729         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12730         if (arg2 & TARGET_O_NONBLOCK) {
12731             host_flags |= O_NONBLOCK;
12732         }
12733         if (arg2 & TARGET_O_CLOEXEC) {
12734             host_flags |= O_CLOEXEC;
12735         }
12736         ret = get_errno(eventfd(arg1, host_flags));
12737         if (ret >= 0) {
12738             fd_trans_register(ret, &target_eventfd_trans);
12739         }
12740         return ret;
12741     }
12742 #endif
12743 #endif /* CONFIG_EVENTFD  */
12744 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12745     case TARGET_NR_fallocate:
12746 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12747         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12748                                   target_offset64(arg5, arg6)));
12749 #else
12750         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12751 #endif
12752         return ret;
12753 #endif
12754 #if defined(CONFIG_SYNC_FILE_RANGE)
12755 #if defined(TARGET_NR_sync_file_range)
12756     case TARGET_NR_sync_file_range:
12757 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12758 #if defined(TARGET_MIPS)
12759         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12760                                         target_offset64(arg5, arg6), arg7));
12761 #else
12762         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12763                                         target_offset64(arg4, arg5), arg6));
12764 #endif /* !TARGET_MIPS */
12765 #else
12766         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12767 #endif
12768         return ret;
12769 #endif
12770 #if defined(TARGET_NR_sync_file_range2) || \
12771     defined(TARGET_NR_arm_sync_file_range)
12772 #if defined(TARGET_NR_sync_file_range2)
12773     case TARGET_NR_sync_file_range2:
12774 #endif
12775 #if defined(TARGET_NR_arm_sync_file_range)
12776     case TARGET_NR_arm_sync_file_range:
12777 #endif
12778         /* This is like sync_file_range but the arguments are reordered */
12779 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12780         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12781                                         target_offset64(arg5, arg6), arg2));
12782 #else
12783         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12784 #endif
12785         return ret;
12786 #endif
12787 #endif
12788 #if defined(TARGET_NR_signalfd4)
12789     case TARGET_NR_signalfd4:
12790         return do_signalfd4(arg1, arg2, arg4);
12791 #endif
12792 #if defined(TARGET_NR_signalfd)
12793     case TARGET_NR_signalfd:
12794         return do_signalfd4(arg1, arg2, 0);
12795 #endif
12796 #if defined(CONFIG_EPOLL)
12797 #if defined(TARGET_NR_epoll_create)
12798     case TARGET_NR_epoll_create:
12799         return get_errno(epoll_create(arg1));
12800 #endif
12801 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12802     case TARGET_NR_epoll_create1:
12803         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12804 #endif
12805 #if defined(TARGET_NR_epoll_ctl)
12806     case TARGET_NR_epoll_ctl:
12807     {
12808         struct epoll_event ep;
12809         struct epoll_event *epp = 0;
12810         if (arg4) {
12811             if (arg2 != EPOLL_CTL_DEL) {
12812                 struct target_epoll_event *target_ep;
12813                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12814                     return -TARGET_EFAULT;
12815                 }
12816                 ep.events = tswap32(target_ep->events);
12817                 /*
12818                  * The epoll_data_t union is just opaque data to the kernel,
12819                  * so we transfer all 64 bits across and need not worry what
12820                  * actual data type it is.
12821                  */
12822                 ep.data.u64 = tswap64(target_ep->data.u64);
12823                 unlock_user_struct(target_ep, arg4, 0);
12824             }
12825             /*
12826              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12827              * non-null pointer, even though this argument is ignored.
12828              *
12829              */
12830             epp = &ep;
12831         }
12832         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12833     }
12834 #endif
12835 
12836 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12837 #if defined(TARGET_NR_epoll_wait)
12838     case TARGET_NR_epoll_wait:
12839 #endif
12840 #if defined(TARGET_NR_epoll_pwait)
12841     case TARGET_NR_epoll_pwait:
12842 #endif
12843     {
12844         struct target_epoll_event *target_ep;
12845         struct epoll_event *ep;
12846         int epfd = arg1;
12847         int maxevents = arg3;
12848         int timeout = arg4;
12849 
12850         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12851             return -TARGET_EINVAL;
12852         }
12853 
12854         target_ep = lock_user(VERIFY_WRITE, arg2,
12855                               maxevents * sizeof(struct target_epoll_event), 1);
12856         if (!target_ep) {
12857             return -TARGET_EFAULT;
12858         }
12859 
12860         ep = g_try_new(struct epoll_event, maxevents);
12861         if (!ep) {
12862             unlock_user(target_ep, arg2, 0);
12863             return -TARGET_ENOMEM;
12864         }
12865 
12866         switch (num) {
12867 #if defined(TARGET_NR_epoll_pwait)
12868         case TARGET_NR_epoll_pwait:
12869         {
12870             sigset_t *set = NULL;
12871 
12872             if (arg5) {
12873                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12874                 if (ret != 0) {
12875                     break;
12876                 }
12877             }
12878 
12879             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12880                                              set, SIGSET_T_SIZE));
12881 
12882             if (set) {
12883                 finish_sigsuspend_mask(ret);
12884             }
12885             break;
12886         }
12887 #endif
12888 #if defined(TARGET_NR_epoll_wait)
12889         case TARGET_NR_epoll_wait:
12890             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12891                                              NULL, 0));
12892             break;
12893 #endif
12894         default:
12895             ret = -TARGET_ENOSYS;
12896         }
12897         if (!is_error(ret)) {
12898             int i;
12899             for (i = 0; i < ret; i++) {
12900                 target_ep[i].events = tswap32(ep[i].events);
12901                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12902             }
12903             unlock_user(target_ep, arg2,
12904                         ret * sizeof(struct target_epoll_event));
12905         } else {
12906             unlock_user(target_ep, arg2, 0);
12907         }
12908         g_free(ep);
12909         return ret;
12910     }
12911 #endif
12912 #endif
12913 #ifdef TARGET_NR_prlimit64
12914     case TARGET_NR_prlimit64:
12915     {
12916         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12917         struct target_rlimit64 *target_rnew, *target_rold;
12918         struct host_rlimit64 rnew, rold, *rnewp = 0;
12919         int resource = target_to_host_resource(arg2);
12920 
12921         if (arg3 && (resource != RLIMIT_AS &&
12922                      resource != RLIMIT_DATA &&
12923                      resource != RLIMIT_STACK)) {
12924             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12925                 return -TARGET_EFAULT;
12926             }
12927             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
12928             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
12929             unlock_user_struct(target_rnew, arg3, 0);
12930             rnewp = &rnew;
12931         }
12932 
12933         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12934         if (!is_error(ret) && arg4) {
12935             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12936                 return -TARGET_EFAULT;
12937             }
12938             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
12939             __put_user(rold.rlim_max, &target_rold->rlim_max);
12940             unlock_user_struct(target_rold, arg4, 1);
12941         }
12942         return ret;
12943     }
12944 #endif
12945 #ifdef TARGET_NR_gethostname
12946     case TARGET_NR_gethostname:
12947     {
12948         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12949         if (name) {
12950             ret = get_errno(gethostname(name, arg2));
12951             unlock_user(name, arg1, arg2);
12952         } else {
12953             ret = -TARGET_EFAULT;
12954         }
12955         return ret;
12956     }
12957 #endif
12958 #ifdef TARGET_NR_atomic_cmpxchg_32
12959     case TARGET_NR_atomic_cmpxchg_32:
12960     {
12961         /* should use start_exclusive from main.c */
12962         abi_ulong mem_value;
12963         if (get_user_u32(mem_value, arg6)) {
12964             target_siginfo_t info;
12965             info.si_signo = SIGSEGV;
12966             info.si_errno = 0;
12967             info.si_code = TARGET_SEGV_MAPERR;
12968             info._sifields._sigfault._addr = arg6;
12969             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12970             ret = 0xdeadbeef;
12971 
12972         }
12973         if (mem_value == arg2)
12974             put_user_u32(arg1, arg6);
12975         return mem_value;
12976     }
12977 #endif
12978 #ifdef TARGET_NR_atomic_barrier
12979     case TARGET_NR_atomic_barrier:
12980         /* Like the kernel implementation and the
12981            qemu arm barrier, no-op this? */
12982         return 0;
12983 #endif
12984 
12985 #ifdef TARGET_NR_timer_create
12986     case TARGET_NR_timer_create:
12987     {
12988         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12989 
12990         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12991 
12992         int clkid = arg1;
12993         int timer_index = next_free_host_timer();
12994 
12995         if (timer_index < 0) {
12996             ret = -TARGET_EAGAIN;
12997         } else {
12998             timer_t *phtimer = g_posix_timers  + timer_index;
12999 
13000             if (arg2) {
13001                 phost_sevp = &host_sevp;
13002                 ret = target_to_host_sigevent(phost_sevp, arg2);
13003                 if (ret != 0) {
13004                     free_host_timer_slot(timer_index);
13005                     return ret;
13006                 }
13007             }
13008 
13009             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13010             if (ret) {
13011                 free_host_timer_slot(timer_index);
13012             } else {
13013                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13014                     timer_delete(*phtimer);
13015                     free_host_timer_slot(timer_index);
13016                     return -TARGET_EFAULT;
13017                 }
13018             }
13019         }
13020         return ret;
13021     }
13022 #endif
13023 
13024 #ifdef TARGET_NR_timer_settime
13025     case TARGET_NR_timer_settime:
13026     {
13027         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13028          * struct itimerspec * old_value */
13029         target_timer_t timerid = get_timer_id(arg1);
13030 
13031         if (timerid < 0) {
13032             ret = timerid;
13033         } else if (arg3 == 0) {
13034             ret = -TARGET_EINVAL;
13035         } else {
13036             timer_t htimer = g_posix_timers[timerid];
13037             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13038 
13039             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13040                 return -TARGET_EFAULT;
13041             }
13042             ret = get_errno(
13043                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13044             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13045                 return -TARGET_EFAULT;
13046             }
13047         }
13048         return ret;
13049     }
13050 #endif
13051 
13052 #ifdef TARGET_NR_timer_settime64
13053     case TARGET_NR_timer_settime64:
13054     {
13055         target_timer_t timerid = get_timer_id(arg1);
13056 
13057         if (timerid < 0) {
13058             ret = timerid;
13059         } else if (arg3 == 0) {
13060             ret = -TARGET_EINVAL;
13061         } else {
13062             timer_t htimer = g_posix_timers[timerid];
13063             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13064 
13065             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13066                 return -TARGET_EFAULT;
13067             }
13068             ret = get_errno(
13069                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13070             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13071                 return -TARGET_EFAULT;
13072             }
13073         }
13074         return ret;
13075     }
13076 #endif
13077 
13078 #ifdef TARGET_NR_timer_gettime
13079     case TARGET_NR_timer_gettime:
13080     {
13081         /* args: timer_t timerid, struct itimerspec *curr_value */
13082         target_timer_t timerid = get_timer_id(arg1);
13083 
13084         if (timerid < 0) {
13085             ret = timerid;
13086         } else if (!arg2) {
13087             ret = -TARGET_EFAULT;
13088         } else {
13089             timer_t htimer = g_posix_timers[timerid];
13090             struct itimerspec hspec;
13091             ret = get_errno(timer_gettime(htimer, &hspec));
13092 
13093             if (host_to_target_itimerspec(arg2, &hspec)) {
13094                 ret = -TARGET_EFAULT;
13095             }
13096         }
13097         return ret;
13098     }
13099 #endif
13100 
13101 #ifdef TARGET_NR_timer_gettime64
13102     case TARGET_NR_timer_gettime64:
13103     {
13104         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13105         target_timer_t timerid = get_timer_id(arg1);
13106 
13107         if (timerid < 0) {
13108             ret = timerid;
13109         } else if (!arg2) {
13110             ret = -TARGET_EFAULT;
13111         } else {
13112             timer_t htimer = g_posix_timers[timerid];
13113             struct itimerspec hspec;
13114             ret = get_errno(timer_gettime(htimer, &hspec));
13115 
13116             if (host_to_target_itimerspec64(arg2, &hspec)) {
13117                 ret = -TARGET_EFAULT;
13118             }
13119         }
13120         return ret;
13121     }
13122 #endif
13123 
13124 #ifdef TARGET_NR_timer_getoverrun
13125     case TARGET_NR_timer_getoverrun:
13126     {
13127         /* args: timer_t timerid */
13128         target_timer_t timerid = get_timer_id(arg1);
13129 
13130         if (timerid < 0) {
13131             ret = timerid;
13132         } else {
13133             timer_t htimer = g_posix_timers[timerid];
13134             ret = get_errno(timer_getoverrun(htimer));
13135         }
13136         return ret;
13137     }
13138 #endif
13139 
13140 #ifdef TARGET_NR_timer_delete
13141     case TARGET_NR_timer_delete:
13142     {
13143         /* args: timer_t timerid */
13144         target_timer_t timerid = get_timer_id(arg1);
13145 
13146         if (timerid < 0) {
13147             ret = timerid;
13148         } else {
13149             timer_t htimer = g_posix_timers[timerid];
13150             ret = get_errno(timer_delete(htimer));
13151             free_host_timer_slot(timerid);
13152         }
13153         return ret;
13154     }
13155 #endif
13156 
13157 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13158     case TARGET_NR_timerfd_create:
13159         ret = get_errno(timerfd_create(arg1,
13160                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13161         if (ret >= 0) {
13162             fd_trans_register(ret, &target_timerfd_trans);
13163         }
13164         return ret;
13165 #endif
13166 
13167 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13168     case TARGET_NR_timerfd_gettime:
13169         {
13170             struct itimerspec its_curr;
13171 
13172             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13173 
13174             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13175                 return -TARGET_EFAULT;
13176             }
13177         }
13178         return ret;
13179 #endif
13180 
13181 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13182     case TARGET_NR_timerfd_gettime64:
13183         {
13184             struct itimerspec its_curr;
13185 
13186             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13187 
13188             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13189                 return -TARGET_EFAULT;
13190             }
13191         }
13192         return ret;
13193 #endif
13194 
13195 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13196     case TARGET_NR_timerfd_settime:
13197         {
13198             struct itimerspec its_new, its_old, *p_new;
13199 
13200             if (arg3) {
13201                 if (target_to_host_itimerspec(&its_new, arg3)) {
13202                     return -TARGET_EFAULT;
13203                 }
13204                 p_new = &its_new;
13205             } else {
13206                 p_new = NULL;
13207             }
13208 
13209             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13210 
13211             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13212                 return -TARGET_EFAULT;
13213             }
13214         }
13215         return ret;
13216 #endif
13217 
13218 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13219     case TARGET_NR_timerfd_settime64:
13220         {
13221             struct itimerspec its_new, its_old, *p_new;
13222 
13223             if (arg3) {
13224                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13225                     return -TARGET_EFAULT;
13226                 }
13227                 p_new = &its_new;
13228             } else {
13229                 p_new = NULL;
13230             }
13231 
13232             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13233 
13234             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13235                 return -TARGET_EFAULT;
13236             }
13237         }
13238         return ret;
13239 #endif
13240 
13241 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13242     case TARGET_NR_ioprio_get:
13243         return get_errno(ioprio_get(arg1, arg2));
13244 #endif
13245 
13246 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13247     case TARGET_NR_ioprio_set:
13248         return get_errno(ioprio_set(arg1, arg2, arg3));
13249 #endif
13250 
13251 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13252     case TARGET_NR_setns:
13253         return get_errno(setns(arg1, arg2));
13254 #endif
13255 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13256     case TARGET_NR_unshare:
13257         return get_errno(unshare(arg1));
13258 #endif
13259 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13260     case TARGET_NR_kcmp:
13261         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13262 #endif
13263 #ifdef TARGET_NR_swapcontext
13264     case TARGET_NR_swapcontext:
13265         /* PowerPC specific.  */
13266         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13267 #endif
13268 #ifdef TARGET_NR_memfd_create
13269     case TARGET_NR_memfd_create:
13270         p = lock_user_string(arg1);
13271         if (!p) {
13272             return -TARGET_EFAULT;
13273         }
13274         ret = get_errno(memfd_create(p, arg2));
13275         fd_trans_unregister(ret);
13276         unlock_user(p, arg1, 0);
13277         return ret;
13278 #endif
13279 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13280     case TARGET_NR_membarrier:
13281         return get_errno(membarrier(arg1, arg2));
13282 #endif
13283 
13284 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13285     case TARGET_NR_copy_file_range:
13286         {
13287             loff_t inoff, outoff;
13288             loff_t *pinoff = NULL, *poutoff = NULL;
13289 
13290             if (arg2) {
13291                 if (get_user_u64(inoff, arg2)) {
13292                     return -TARGET_EFAULT;
13293                 }
13294                 pinoff = &inoff;
13295             }
13296             if (arg4) {
13297                 if (get_user_u64(outoff, arg4)) {
13298                     return -TARGET_EFAULT;
13299                 }
13300                 poutoff = &outoff;
13301             }
13302             /* Do not sign-extend the count parameter. */
13303             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13304                                                  (abi_ulong)arg5, arg6));
13305             if (!is_error(ret) && ret > 0) {
13306                 if (arg2) {
13307                     if (put_user_u64(inoff, arg2)) {
13308                         return -TARGET_EFAULT;
13309                     }
13310                 }
13311                 if (arg4) {
13312                     if (put_user_u64(outoff, arg4)) {
13313                         return -TARGET_EFAULT;
13314                     }
13315                 }
13316             }
13317         }
13318         return ret;
13319 #endif
13320 
13321 #if defined(TARGET_NR_pivot_root)
13322     case TARGET_NR_pivot_root:
13323         {
13324             void *p2;
13325             p = lock_user_string(arg1); /* new_root */
13326             p2 = lock_user_string(arg2); /* put_old */
13327             if (!p || !p2) {
13328                 ret = -TARGET_EFAULT;
13329             } else {
13330                 ret = get_errno(pivot_root(p, p2));
13331             }
13332             unlock_user(p2, arg2, 0);
13333             unlock_user(p, arg1, 0);
13334         }
13335         return ret;
13336 #endif
13337 
13338     default:
13339         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13340         return -TARGET_ENOSYS;
13341     }
13342     return ret;
13343 }
13344 
13345 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13346                     abi_long arg2, abi_long arg3, abi_long arg4,
13347                     abi_long arg5, abi_long arg6, abi_long arg7,
13348                     abi_long arg8)
13349 {
13350     CPUState *cpu = env_cpu(cpu_env);
13351     abi_long ret;
13352 
13353 #ifdef DEBUG_ERESTARTSYS
13354     /* Debug-only code for exercising the syscall-restart code paths
13355      * in the per-architecture cpu main loops: restart every syscall
13356      * the guest makes once before letting it through.
13357      */
13358     {
13359         static bool flag;
13360         flag = !flag;
13361         if (flag) {
13362             return -QEMU_ERESTARTSYS;
13363         }
13364     }
13365 #endif
13366 
13367     record_syscall_start(cpu, num, arg1,
13368                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13369 
13370     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13371         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13372     }
13373 
13374     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13375                       arg5, arg6, arg7, arg8);
13376 
13377     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13378         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13379                           arg3, arg4, arg5, arg6);
13380     }
13381 
13382     record_syscall_return(cpu, num, ret);
13383     return ret;
13384 }
13385