xref: /openbmc/qemu/linux-user/syscall.c (revision 86f04735)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 #include "cpu_loop-common.h"
144 
145 #ifndef CLONE_IO
146 #define CLONE_IO                0x80000000      /* Clone io context */
147 #endif
148 
149 /* We can't directly call the host clone syscall, because this will
150  * badly confuse libc (breaking mutexes, for example). So we must
151  * divide clone flags into:
152  *  * flag combinations that look like pthread_create()
153  *  * flag combinations that look like fork()
154  *  * flags we can implement within QEMU itself
155  *  * flags we can't support and will return an error for
156  */
157 /* For thread creation, all these flags must be present; for
158  * fork, none must be present.
159  */
160 #define CLONE_THREAD_FLAGS                              \
161     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
162      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 
164 /* These flags are ignored:
165  * CLONE_DETACHED is now ignored by the kernel;
166  * CLONE_IO is just an optimisation hint to the I/O scheduler
167  */
168 #define CLONE_IGNORED_FLAGS                     \
169     (CLONE_DETACHED | CLONE_IO)
170 
171 /* Flags for fork which we can implement within QEMU itself */
172 #define CLONE_OPTIONAL_FORK_FLAGS               \
173     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
174      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 
176 /* Flags for thread creation which we can implement within QEMU itself */
177 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
178     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
179      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 
181 #define CLONE_INVALID_FORK_FLAGS                                        \
182     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 
184 #define CLONE_INVALID_THREAD_FLAGS                                      \
185     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
186        CLONE_IGNORED_FLAGS))
187 
188 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
189  * have almost all been allocated. We cannot support any of
190  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
191  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
192  * The checks against the invalid thread masks above will catch these.
193  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
194  */
195 
196 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
197  * once. This exercises the codepaths for restart.
198  */
199 //#define DEBUG_ERESTARTSYS
200 
201 //#include <linux/msdos_fs.h>
202 #define VFAT_IOCTL_READDIR_BOTH \
203     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
204 #define VFAT_IOCTL_READDIR_SHORT \
205     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
206 
207 #undef _syscall0
208 #undef _syscall1
209 #undef _syscall2
210 #undef _syscall3
211 #undef _syscall4
212 #undef _syscall5
213 #undef _syscall6
214 
215 #define _syscall0(type,name)		\
216 static type name (void)			\
217 {					\
218 	return syscall(__NR_##name);	\
219 }
220 
221 #define _syscall1(type,name,type1,arg1)		\
222 static type name (type1 arg1)			\
223 {						\
224 	return syscall(__NR_##name, arg1);	\
225 }
226 
227 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
228 static type name (type1 arg1,type2 arg2)		\
229 {							\
230 	return syscall(__NR_##name, arg1, arg2);	\
231 }
232 
233 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
234 static type name (type1 arg1,type2 arg2,type3 arg3)		\
235 {								\
236 	return syscall(__NR_##name, arg1, arg2, arg3);		\
237 }
238 
239 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
241 {										\
242 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
243 }
244 
245 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
246 		  type5,arg5)							\
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
248 {										\
249 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
250 }
251 
252 
253 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
254 		  type5,arg5,type6,arg6)					\
255 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
256                   type6 arg6)							\
257 {										\
258 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
259 }
260 
261 
262 #define __NR_sys_uname __NR_uname
263 #define __NR_sys_getcwd1 __NR_getcwd
264 #define __NR_sys_getdents __NR_getdents
265 #define __NR_sys_getdents64 __NR_getdents64
266 #define __NR_sys_getpriority __NR_getpriority
267 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
268 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
269 #define __NR_sys_syslog __NR_syslog
270 #if defined(__NR_futex)
271 # define __NR_sys_futex __NR_futex
272 #endif
273 #if defined(__NR_futex_time64)
274 # define __NR_sys_futex_time64 __NR_futex_time64
275 #endif
276 #define __NR_sys_statx __NR_statx
277 
278 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
279 #define __NR__llseek __NR_lseek
280 #endif
281 
282 /* Newer kernel ports have llseek() instead of _llseek() */
283 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
284 #define TARGET_NR__llseek TARGET_NR_llseek
285 #endif
286 
287 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
288 #ifndef TARGET_O_NONBLOCK_MASK
289 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
290 #endif
291 
292 #define __NR_sys_gettid __NR_gettid
293 _syscall0(int, sys_gettid)
294 
295 /* For the 64-bit guest on 32-bit host case we must emulate
296  * getdents using getdents64, because otherwise the host
297  * might hand us back more dirent records than we can fit
298  * into the guest buffer after structure format conversion.
299  * Otherwise we emulate getdents with getdents if the host has it.
300  */
301 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
302 #define EMULATE_GETDENTS_WITH_GETDENTS
303 #endif
304 
305 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
306 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
307 #endif
308 #if (defined(TARGET_NR_getdents) && \
309       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
310     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
311 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
312 #endif
313 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
314 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
315           loff_t *, res, uint, wh);
316 #endif
317 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
318 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
319           siginfo_t *, uinfo)
320 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
321 #ifdef __NR_exit_group
322 _syscall1(int,exit_group,int,error_code)
323 #endif
324 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
325 #define __NR_sys_close_range __NR_close_range
326 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
327 #ifndef CLOSE_RANGE_CLOEXEC
328 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
329 #endif
330 #endif
331 #if defined(__NR_futex)
332 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
333           const struct timespec *,timeout,int *,uaddr2,int,val3)
334 #endif
335 #if defined(__NR_futex_time64)
336 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
337           const struct timespec *,timeout,int *,uaddr2,int,val3)
338 #endif
339 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
340 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
341 #endif
342 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
343 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
344                              unsigned int, flags);
345 #endif
346 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
347 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
348 #endif
349 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
350 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
351           unsigned long *, user_mask_ptr);
352 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
353 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
354           unsigned long *, user_mask_ptr);
355 /* sched_attr is not defined in glibc */
356 struct sched_attr {
357     uint32_t size;
358     uint32_t sched_policy;
359     uint64_t sched_flags;
360     int32_t sched_nice;
361     uint32_t sched_priority;
362     uint64_t sched_runtime;
363     uint64_t sched_deadline;
364     uint64_t sched_period;
365     uint32_t sched_util_min;
366     uint32_t sched_util_max;
367 };
368 #define __NR_sys_sched_getattr __NR_sched_getattr
369 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
370           unsigned int, size, unsigned int, flags);
371 #define __NR_sys_sched_setattr __NR_sched_setattr
372 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
373           unsigned int, flags);
374 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
375 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
376 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
377 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
378           const struct sched_param *, param);
379 #define __NR_sys_sched_getparam __NR_sched_getparam
380 _syscall2(int, sys_sched_getparam, pid_t, pid,
381           struct sched_param *, param);
382 #define __NR_sys_sched_setparam __NR_sched_setparam
383 _syscall2(int, sys_sched_setparam, pid_t, pid,
384           const struct sched_param *, param);
385 #define __NR_sys_getcpu __NR_getcpu
386 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
387 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
388           void *, arg);
389 _syscall2(int, capget, struct __user_cap_header_struct *, header,
390           struct __user_cap_data_struct *, data);
391 _syscall2(int, capset, struct __user_cap_header_struct *, header,
392           struct __user_cap_data_struct *, data);
393 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
394 _syscall2(int, ioprio_get, int, which, int, who)
395 #endif
396 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
397 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
398 #endif
399 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
400 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
401 #endif
402 
403 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
404 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
405           unsigned long, idx1, unsigned long, idx2)
406 #endif
407 
408 /*
409  * It is assumed that struct statx is architecture independent.
410  */
411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
412 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
413           unsigned int, mask, struct target_statx *, statxbuf)
414 #endif
415 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
416 _syscall2(int, membarrier, int, cmd, int, flags)
417 #endif
418 
419 static const bitmask_transtbl fcntl_flags_tbl[] = {
420   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
421   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
422   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
423   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
424   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
425   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
426   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
427   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
428   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
429   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
430   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
431   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
432   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
433 #if defined(O_DIRECT)
434   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
435 #endif
436 #if defined(O_NOATIME)
437   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
438 #endif
439 #if defined(O_CLOEXEC)
440   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
441 #endif
442 #if defined(O_PATH)
443   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
444 #endif
445 #if defined(O_TMPFILE)
446   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
447 #endif
448   /* Don't terminate the list prematurely on 64-bit host+guest.  */
449 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
450   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
451 #endif
452   { 0, 0, 0, 0 }
453 };
454 
455 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
456 
457 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
458 #if defined(__NR_utimensat)
459 #define __NR_sys_utimensat __NR_utimensat
460 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
461           const struct timespec *,tsp,int,flags)
462 #else
463 static int sys_utimensat(int dirfd, const char *pathname,
464                          const struct timespec times[2], int flags)
465 {
466     errno = ENOSYS;
467     return -1;
468 }
469 #endif
470 #endif /* TARGET_NR_utimensat */
471 
472 #ifdef TARGET_NR_renameat2
473 #if defined(__NR_renameat2)
474 #define __NR_sys_renameat2 __NR_renameat2
475 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
476           const char *, new, unsigned int, flags)
477 #else
478 static int sys_renameat2(int oldfd, const char *old,
479                          int newfd, const char *new, int flags)
480 {
481     if (flags == 0) {
482         return renameat(oldfd, old, newfd, new);
483     }
484     errno = ENOSYS;
485     return -1;
486 }
487 #endif
488 #endif /* TARGET_NR_renameat2 */
489 
490 #ifdef CONFIG_INOTIFY
491 #include <sys/inotify.h>
492 #else
493 /* Userspace can usually survive runtime without inotify */
494 #undef TARGET_NR_inotify_init
495 #undef TARGET_NR_inotify_init1
496 #undef TARGET_NR_inotify_add_watch
497 #undef TARGET_NR_inotify_rm_watch
498 #endif /* CONFIG_INOTIFY  */
499 
500 #if defined(TARGET_NR_prlimit64)
501 #ifndef __NR_prlimit64
502 # define __NR_prlimit64 -1
503 #endif
504 #define __NR_sys_prlimit64 __NR_prlimit64
505 /* The glibc rlimit structure may not be that used by the underlying syscall */
506 struct host_rlimit64 {
507     uint64_t rlim_cur;
508     uint64_t rlim_max;
509 };
510 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
511           const struct host_rlimit64 *, new_limit,
512           struct host_rlimit64 *, old_limit)
513 #endif
514 
515 
516 #if defined(TARGET_NR_timer_create)
517 /* Maximum of 32 active POSIX timers allowed at any one time. */
518 #define GUEST_TIMER_MAX 32
519 static timer_t g_posix_timers[GUEST_TIMER_MAX];
520 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
521 
522 static inline int next_free_host_timer(void)
523 {
524     int k;
525     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
526         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
527             return k;
528         }
529     }
530     return -1;
531 }
532 
533 static inline void free_host_timer_slot(int id)
534 {
535     qatomic_store_release(g_posix_timer_allocated + id, 0);
536 }
537 #endif
538 
539 static inline int host_to_target_errno(int host_errno)
540 {
541     switch (host_errno) {
542 #define E(X)  case X: return TARGET_##X;
543 #include "errnos.c.inc"
544 #undef E
545     default:
546         return host_errno;
547     }
548 }
549 
550 static inline int target_to_host_errno(int target_errno)
551 {
552     switch (target_errno) {
553 #define E(X)  case TARGET_##X: return X;
554 #include "errnos.c.inc"
555 #undef E
556     default:
557         return target_errno;
558     }
559 }
560 
561 abi_long get_errno(abi_long ret)
562 {
563     if (ret == -1)
564         return -host_to_target_errno(errno);
565     else
566         return ret;
567 }
568 
569 const char *target_strerror(int err)
570 {
571     if (err == QEMU_ERESTARTSYS) {
572         return "To be restarted";
573     }
574     if (err == QEMU_ESIGRETURN) {
575         return "Successful exit from sigreturn";
576     }
577 
578     return strerror(target_to_host_errno(err));
579 }
580 
581 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
582 {
583     int i;
584     uint8_t b;
585     if (usize <= ksize) {
586         return 1;
587     }
588     for (i = ksize; i < usize; i++) {
589         if (get_user_u8(b, addr + i)) {
590             return -TARGET_EFAULT;
591         }
592         if (b != 0) {
593             return 0;
594         }
595     }
596     return 1;
597 }
598 
599 #define safe_syscall0(type, name) \
600 static type safe_##name(void) \
601 { \
602     return safe_syscall(__NR_##name); \
603 }
604 
605 #define safe_syscall1(type, name, type1, arg1) \
606 static type safe_##name(type1 arg1) \
607 { \
608     return safe_syscall(__NR_##name, arg1); \
609 }
610 
611 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
612 static type safe_##name(type1 arg1, type2 arg2) \
613 { \
614     return safe_syscall(__NR_##name, arg1, arg2); \
615 }
616 
617 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
618 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
621 }
622 
623 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
624     type4, arg4) \
625 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
626 { \
627     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
628 }
629 
630 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
631     type4, arg4, type5, arg5) \
632 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
633     type5 arg5) \
634 { \
635     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
636 }
637 
638 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
639     type4, arg4, type5, arg5, type6, arg6) \
640 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641     type5 arg5, type6 arg6) \
642 { \
643     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
644 }
645 
646 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
647 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
648 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
649               int, flags, mode_t, mode)
650 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
651 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
652               struct rusage *, rusage)
653 #endif
654 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
655               int, options, struct rusage *, rusage)
656 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
657               char **, argv, char **, envp, int, flags)
658 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
659     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
660 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
661               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
662 #endif
663 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
664 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
665               struct timespec *, tsp, const sigset_t *, sigmask,
666               size_t, sigsetsize)
667 #endif
668 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
669               int, maxevents, int, timeout, const sigset_t *, sigmask,
670               size_t, sigsetsize)
671 #if defined(__NR_futex)
672 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
673               const struct timespec *,timeout,int *,uaddr2,int,val3)
674 #endif
675 #if defined(__NR_futex_time64)
676 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
677               const struct timespec *,timeout,int *,uaddr2,int,val3)
678 #endif
679 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
680 safe_syscall2(int, kill, pid_t, pid, int, sig)
681 safe_syscall2(int, tkill, int, tid, int, sig)
682 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
683 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
684 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
685 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
686               unsigned long, pos_l, unsigned long, pos_h)
687 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
688               unsigned long, pos_l, unsigned long, pos_h)
689 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
690               socklen_t, addrlen)
691 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
692               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
693 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
694               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
695 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
696 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
697 safe_syscall2(int, flock, int, fd, int, operation)
698 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
699 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
700               const struct timespec *, uts, size_t, sigsetsize)
701 #endif
702 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
703               int, flags)
704 #if defined(TARGET_NR_nanosleep)
705 safe_syscall2(int, nanosleep, const struct timespec *, req,
706               struct timespec *, rem)
707 #endif
708 #if defined(TARGET_NR_clock_nanosleep) || \
709     defined(TARGET_NR_clock_nanosleep_time64)
710 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
711               const struct timespec *, req, struct timespec *, rem)
712 #endif
713 #ifdef __NR_ipc
714 #ifdef __s390x__
715 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
716               void *, ptr)
717 #else
718 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
719               void *, ptr, long, fifth)
720 #endif
721 #endif
722 #ifdef __NR_msgsnd
723 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
724               int, flags)
725 #endif
726 #ifdef __NR_msgrcv
727 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
728               long, msgtype, int, flags)
729 #endif
730 #ifdef __NR_semtimedop
731 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
732               unsigned, nsops, const struct timespec *, timeout)
733 #endif
734 #if defined(TARGET_NR_mq_timedsend) || \
735     defined(TARGET_NR_mq_timedsend_time64)
736 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
737               size_t, len, unsigned, prio, const struct timespec *, timeout)
738 #endif
739 #if defined(TARGET_NR_mq_timedreceive) || \
740     defined(TARGET_NR_mq_timedreceive_time64)
741 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
742               size_t, len, unsigned *, prio, const struct timespec *, timeout)
743 #endif
744 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
745 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
746               int, outfd, loff_t *, poutoff, size_t, length,
747               unsigned int, flags)
748 #endif
749 
750 /* We do ioctl like this rather than via safe_syscall3 to preserve the
751  * "third argument might be integer or pointer or not present" behaviour of
752  * the libc function.
753  */
754 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
755 /* Similarly for fcntl. Note that callers must always:
756  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
757  *  use the flock64 struct rather than unsuffixed flock
758  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
759  */
760 #ifdef __NR_fcntl64
761 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
762 #else
763 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
764 #endif
765 
766 static inline int host_to_target_sock_type(int host_type)
767 {
768     int target_type;
769 
770     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
771     case SOCK_DGRAM:
772         target_type = TARGET_SOCK_DGRAM;
773         break;
774     case SOCK_STREAM:
775         target_type = TARGET_SOCK_STREAM;
776         break;
777     default:
778         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
779         break;
780     }
781 
782 #if defined(SOCK_CLOEXEC)
783     if (host_type & SOCK_CLOEXEC) {
784         target_type |= TARGET_SOCK_CLOEXEC;
785     }
786 #endif
787 
788 #if defined(SOCK_NONBLOCK)
789     if (host_type & SOCK_NONBLOCK) {
790         target_type |= TARGET_SOCK_NONBLOCK;
791     }
792 #endif
793 
794     return target_type;
795 }
796 
797 static abi_ulong target_brk;
798 static abi_ulong brk_page;
799 
800 void target_set_brk(abi_ulong new_brk)
801 {
802     target_brk = new_brk;
803     brk_page = HOST_PAGE_ALIGN(target_brk);
804 }
805 
806 /* do_brk() must return target values and target errnos. */
807 abi_long do_brk(abi_ulong brk_val)
808 {
809     abi_long mapped_addr;
810     abi_ulong new_alloc_size;
811     abi_ulong new_brk, new_host_brk_page;
812 
813     /* brk pointers are always untagged */
814 
815     /* return old brk value if brk_val unchanged or zero */
816     if (!brk_val || brk_val == target_brk) {
817         return target_brk;
818     }
819 
820     new_brk = TARGET_PAGE_ALIGN(brk_val);
821     new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
822 
823     /* brk_val and old target_brk might be on the same page */
824     if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
825         if (brk_val > target_brk) {
826             /* empty remaining bytes in (possibly larger) host page */
827             memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk);
828         }
829         target_brk = brk_val;
830         return target_brk;
831     }
832 
833     /* Release heap if necesary */
834     if (new_brk < target_brk) {
835         /* empty remaining bytes in (possibly larger) host page */
836         memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val);
837 
838         /* free unused host pages and set new brk_page */
839         target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
840         brk_page = new_host_brk_page;
841 
842         target_brk = brk_val;
843         return target_brk;
844     }
845 
846     /* We need to allocate more memory after the brk... Note that
847      * we don't use MAP_FIXED because that will map over the top of
848      * any existing mapping (like the one with the host libc or qemu
849      * itself); instead we treat "mapped but at wrong address" as
850      * a failure and unmap again.
851      */
852     new_alloc_size = new_host_brk_page - brk_page;
853     if (new_alloc_size) {
854         mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
855                                         PROT_READ|PROT_WRITE,
856                                         MAP_ANON|MAP_PRIVATE, 0, 0));
857     } else {
858         mapped_addr = brk_page;
859     }
860 
861     if (mapped_addr == brk_page) {
862         /* Heap contents are initialized to zero, as for anonymous
863          * mapped pages.  Technically the new pages are already
864          * initialized to zero since they *are* anonymous mapped
865          * pages, however we have to take care with the contents that
866          * come from the remaining part of the previous page: it may
867          * contains garbage data due to a previous heap usage (grown
868          * then shrunken).  */
869         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
870 
871         target_brk = brk_val;
872         brk_page = new_host_brk_page;
873         return target_brk;
874     } else if (mapped_addr != -1) {
875         /* Mapped but at wrong address, meaning there wasn't actually
876          * enough space for this brk.
877          */
878         target_munmap(mapped_addr, new_alloc_size);
879         mapped_addr = -1;
880     }
881 
882 #if defined(TARGET_ALPHA)
883     /* We (partially) emulate OSF/1 on Alpha, which requires we
884        return a proper errno, not an unchanged brk value.  */
885     return -TARGET_ENOMEM;
886 #endif
887     /* For everything else, return the previous break. */
888     return target_brk;
889 }
890 
891 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
892     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
893 static inline abi_long copy_from_user_fdset(fd_set *fds,
894                                             abi_ulong target_fds_addr,
895                                             int n)
896 {
897     int i, nw, j, k;
898     abi_ulong b, *target_fds;
899 
900     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
901     if (!(target_fds = lock_user(VERIFY_READ,
902                                  target_fds_addr,
903                                  sizeof(abi_ulong) * nw,
904                                  1)))
905         return -TARGET_EFAULT;
906 
907     FD_ZERO(fds);
908     k = 0;
909     for (i = 0; i < nw; i++) {
910         /* grab the abi_ulong */
911         __get_user(b, &target_fds[i]);
912         for (j = 0; j < TARGET_ABI_BITS; j++) {
913             /* check the bit inside the abi_ulong */
914             if ((b >> j) & 1)
915                 FD_SET(k, fds);
916             k++;
917         }
918     }
919 
920     unlock_user(target_fds, target_fds_addr, 0);
921 
922     return 0;
923 }
924 
925 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
926                                                  abi_ulong target_fds_addr,
927                                                  int n)
928 {
929     if (target_fds_addr) {
930         if (copy_from_user_fdset(fds, target_fds_addr, n))
931             return -TARGET_EFAULT;
932         *fds_ptr = fds;
933     } else {
934         *fds_ptr = NULL;
935     }
936     return 0;
937 }
938 
939 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
940                                           const fd_set *fds,
941                                           int n)
942 {
943     int i, nw, j, k;
944     abi_long v;
945     abi_ulong *target_fds;
946 
947     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
948     if (!(target_fds = lock_user(VERIFY_WRITE,
949                                  target_fds_addr,
950                                  sizeof(abi_ulong) * nw,
951                                  0)))
952         return -TARGET_EFAULT;
953 
954     k = 0;
955     for (i = 0; i < nw; i++) {
956         v = 0;
957         for (j = 0; j < TARGET_ABI_BITS; j++) {
958             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
959             k++;
960         }
961         __put_user(v, &target_fds[i]);
962     }
963 
964     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
965 
966     return 0;
967 }
968 #endif
969 
970 #if defined(__alpha__)
971 #define HOST_HZ 1024
972 #else
973 #define HOST_HZ 100
974 #endif
975 
976 static inline abi_long host_to_target_clock_t(long ticks)
977 {
978 #if HOST_HZ == TARGET_HZ
979     return ticks;
980 #else
981     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
982 #endif
983 }
984 
985 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
986                                              const struct rusage *rusage)
987 {
988     struct target_rusage *target_rusage;
989 
990     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
991         return -TARGET_EFAULT;
992     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
993     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
994     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
995     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
996     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
997     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
998     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
999     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1000     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1001     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1002     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1003     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1004     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1005     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1006     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1007     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1008     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1009     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1010     unlock_user_struct(target_rusage, target_addr, 1);
1011 
1012     return 0;
1013 }
1014 
1015 #ifdef TARGET_NR_setrlimit
1016 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1017 {
1018     abi_ulong target_rlim_swap;
1019     rlim_t result;
1020 
1021     target_rlim_swap = tswapal(target_rlim);
1022     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1023         return RLIM_INFINITY;
1024 
1025     result = target_rlim_swap;
1026     if (target_rlim_swap != (rlim_t)result)
1027         return RLIM_INFINITY;
1028 
1029     return result;
1030 }
1031 #endif
1032 
1033 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1034 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1035 {
1036     abi_ulong target_rlim_swap;
1037     abi_ulong result;
1038 
1039     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1040         target_rlim_swap = TARGET_RLIM_INFINITY;
1041     else
1042         target_rlim_swap = rlim;
1043     result = tswapal(target_rlim_swap);
1044 
1045     return result;
1046 }
1047 #endif
1048 
1049 static inline int target_to_host_resource(int code)
1050 {
1051     switch (code) {
1052     case TARGET_RLIMIT_AS:
1053         return RLIMIT_AS;
1054     case TARGET_RLIMIT_CORE:
1055         return RLIMIT_CORE;
1056     case TARGET_RLIMIT_CPU:
1057         return RLIMIT_CPU;
1058     case TARGET_RLIMIT_DATA:
1059         return RLIMIT_DATA;
1060     case TARGET_RLIMIT_FSIZE:
1061         return RLIMIT_FSIZE;
1062     case TARGET_RLIMIT_LOCKS:
1063         return RLIMIT_LOCKS;
1064     case TARGET_RLIMIT_MEMLOCK:
1065         return RLIMIT_MEMLOCK;
1066     case TARGET_RLIMIT_MSGQUEUE:
1067         return RLIMIT_MSGQUEUE;
1068     case TARGET_RLIMIT_NICE:
1069         return RLIMIT_NICE;
1070     case TARGET_RLIMIT_NOFILE:
1071         return RLIMIT_NOFILE;
1072     case TARGET_RLIMIT_NPROC:
1073         return RLIMIT_NPROC;
1074     case TARGET_RLIMIT_RSS:
1075         return RLIMIT_RSS;
1076     case TARGET_RLIMIT_RTPRIO:
1077         return RLIMIT_RTPRIO;
1078 #ifdef RLIMIT_RTTIME
1079     case TARGET_RLIMIT_RTTIME:
1080         return RLIMIT_RTTIME;
1081 #endif
1082     case TARGET_RLIMIT_SIGPENDING:
1083         return RLIMIT_SIGPENDING;
1084     case TARGET_RLIMIT_STACK:
1085         return RLIMIT_STACK;
1086     default:
1087         return code;
1088     }
1089 }
1090 
1091 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1092                                               abi_ulong target_tv_addr)
1093 {
1094     struct target_timeval *target_tv;
1095 
1096     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1097         return -TARGET_EFAULT;
1098     }
1099 
1100     __get_user(tv->tv_sec, &target_tv->tv_sec);
1101     __get_user(tv->tv_usec, &target_tv->tv_usec);
1102 
1103     unlock_user_struct(target_tv, target_tv_addr, 0);
1104 
1105     return 0;
1106 }
1107 
1108 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1109                                             const struct timeval *tv)
1110 {
1111     struct target_timeval *target_tv;
1112 
1113     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1114         return -TARGET_EFAULT;
1115     }
1116 
1117     __put_user(tv->tv_sec, &target_tv->tv_sec);
1118     __put_user(tv->tv_usec, &target_tv->tv_usec);
1119 
1120     unlock_user_struct(target_tv, target_tv_addr, 1);
1121 
1122     return 0;
1123 }
1124 
1125 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1126 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1127                                                 abi_ulong target_tv_addr)
1128 {
1129     struct target__kernel_sock_timeval *target_tv;
1130 
1131     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1132         return -TARGET_EFAULT;
1133     }
1134 
1135     __get_user(tv->tv_sec, &target_tv->tv_sec);
1136     __get_user(tv->tv_usec, &target_tv->tv_usec);
1137 
1138     unlock_user_struct(target_tv, target_tv_addr, 0);
1139 
1140     return 0;
1141 }
1142 #endif
1143 
1144 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1145                                               const struct timeval *tv)
1146 {
1147     struct target__kernel_sock_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1150         return -TARGET_EFAULT;
1151     }
1152 
1153     __put_user(tv->tv_sec, &target_tv->tv_sec);
1154     __put_user(tv->tv_usec, &target_tv->tv_usec);
1155 
1156     unlock_user_struct(target_tv, target_tv_addr, 1);
1157 
1158     return 0;
1159 }
1160 
1161 #if defined(TARGET_NR_futex) || \
1162     defined(TARGET_NR_rt_sigtimedwait) || \
1163     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1164     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1165     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1166     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1167     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1168     defined(TARGET_NR_timer_settime) || \
1169     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1170 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1171                                                abi_ulong target_addr)
1172 {
1173     struct target_timespec *target_ts;
1174 
1175     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1176         return -TARGET_EFAULT;
1177     }
1178     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1179     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1180     unlock_user_struct(target_ts, target_addr, 0);
1181     return 0;
1182 }
1183 #endif
1184 
1185 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1186     defined(TARGET_NR_timer_settime64) || \
1187     defined(TARGET_NR_mq_timedsend_time64) || \
1188     defined(TARGET_NR_mq_timedreceive_time64) || \
1189     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1190     defined(TARGET_NR_clock_nanosleep_time64) || \
1191     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1192     defined(TARGET_NR_utimensat) || \
1193     defined(TARGET_NR_utimensat_time64) || \
1194     defined(TARGET_NR_semtimedop_time64) || \
1195     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1196 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1197                                                  abi_ulong target_addr)
1198 {
1199     struct target__kernel_timespec *target_ts;
1200 
1201     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1202         return -TARGET_EFAULT;
1203     }
1204     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1205     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1206     /* in 32bit mode, this drops the padding */
1207     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1208     unlock_user_struct(target_ts, target_addr, 0);
1209     return 0;
1210 }
1211 #endif
1212 
1213 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1214                                                struct timespec *host_ts)
1215 {
1216     struct target_timespec *target_ts;
1217 
1218     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1219         return -TARGET_EFAULT;
1220     }
1221     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1222     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1223     unlock_user_struct(target_ts, target_addr, 1);
1224     return 0;
1225 }
1226 
1227 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1228                                                  struct timespec *host_ts)
1229 {
1230     struct target__kernel_timespec *target_ts;
1231 
1232     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1233         return -TARGET_EFAULT;
1234     }
1235     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1236     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1237     unlock_user_struct(target_ts, target_addr, 1);
1238     return 0;
1239 }
1240 
1241 #if defined(TARGET_NR_gettimeofday)
1242 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1243                                              struct timezone *tz)
1244 {
1245     struct target_timezone *target_tz;
1246 
1247     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1248         return -TARGET_EFAULT;
1249     }
1250 
1251     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1252     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1253 
1254     unlock_user_struct(target_tz, target_tz_addr, 1);
1255 
1256     return 0;
1257 }
1258 #endif
1259 
1260 #if defined(TARGET_NR_settimeofday)
1261 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1262                                                abi_ulong target_tz_addr)
1263 {
1264     struct target_timezone *target_tz;
1265 
1266     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1267         return -TARGET_EFAULT;
1268     }
1269 
1270     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1271     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1272 
1273     unlock_user_struct(target_tz, target_tz_addr, 0);
1274 
1275     return 0;
1276 }
1277 #endif
1278 
1279 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1280 #include <mqueue.h>
1281 
1282 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1283                                               abi_ulong target_mq_attr_addr)
1284 {
1285     struct target_mq_attr *target_mq_attr;
1286 
1287     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1288                           target_mq_attr_addr, 1))
1289         return -TARGET_EFAULT;
1290 
1291     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1292     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1293     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1294     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1295 
1296     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1297 
1298     return 0;
1299 }
1300 
1301 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1302                                             const struct mq_attr *attr)
1303 {
1304     struct target_mq_attr *target_mq_attr;
1305 
1306     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1307                           target_mq_attr_addr, 0))
1308         return -TARGET_EFAULT;
1309 
1310     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1311     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1312     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1313     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1314 
1315     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1316 
1317     return 0;
1318 }
1319 #endif
1320 
1321 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1322 /* do_select() must return target values and target errnos. */
1323 static abi_long do_select(int n,
1324                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1325                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1326 {
1327     fd_set rfds, wfds, efds;
1328     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1329     struct timeval tv;
1330     struct timespec ts, *ts_ptr;
1331     abi_long ret;
1332 
1333     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1334     if (ret) {
1335         return ret;
1336     }
1337     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1338     if (ret) {
1339         return ret;
1340     }
1341     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1342     if (ret) {
1343         return ret;
1344     }
1345 
1346     if (target_tv_addr) {
1347         if (copy_from_user_timeval(&tv, target_tv_addr))
1348             return -TARGET_EFAULT;
1349         ts.tv_sec = tv.tv_sec;
1350         ts.tv_nsec = tv.tv_usec * 1000;
1351         ts_ptr = &ts;
1352     } else {
1353         ts_ptr = NULL;
1354     }
1355 
1356     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1357                                   ts_ptr, NULL));
1358 
1359     if (!is_error(ret)) {
1360         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1361             return -TARGET_EFAULT;
1362         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1363             return -TARGET_EFAULT;
1364         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1365             return -TARGET_EFAULT;
1366 
1367         if (target_tv_addr) {
1368             tv.tv_sec = ts.tv_sec;
1369             tv.tv_usec = ts.tv_nsec / 1000;
1370             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1371                 return -TARGET_EFAULT;
1372             }
1373         }
1374     }
1375 
1376     return ret;
1377 }
1378 
1379 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1380 static abi_long do_old_select(abi_ulong arg1)
1381 {
1382     struct target_sel_arg_struct *sel;
1383     abi_ulong inp, outp, exp, tvp;
1384     long nsel;
1385 
1386     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1387         return -TARGET_EFAULT;
1388     }
1389 
1390     nsel = tswapal(sel->n);
1391     inp = tswapal(sel->inp);
1392     outp = tswapal(sel->outp);
1393     exp = tswapal(sel->exp);
1394     tvp = tswapal(sel->tvp);
1395 
1396     unlock_user_struct(sel, arg1, 0);
1397 
1398     return do_select(nsel, inp, outp, exp, tvp);
1399 }
1400 #endif
1401 #endif
1402 
1403 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1404 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1405                             abi_long arg4, abi_long arg5, abi_long arg6,
1406                             bool time64)
1407 {
1408     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1409     fd_set rfds, wfds, efds;
1410     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1411     struct timespec ts, *ts_ptr;
1412     abi_long ret;
1413 
1414     /*
1415      * The 6th arg is actually two args smashed together,
1416      * so we cannot use the C library.
1417      */
1418     struct {
1419         sigset_t *set;
1420         size_t size;
1421     } sig, *sig_ptr;
1422 
1423     abi_ulong arg_sigset, arg_sigsize, *arg7;
1424 
1425     n = arg1;
1426     rfd_addr = arg2;
1427     wfd_addr = arg3;
1428     efd_addr = arg4;
1429     ts_addr = arg5;
1430 
1431     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1432     if (ret) {
1433         return ret;
1434     }
1435     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1436     if (ret) {
1437         return ret;
1438     }
1439     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1440     if (ret) {
1441         return ret;
1442     }
1443 
1444     /*
1445      * This takes a timespec, and not a timeval, so we cannot
1446      * use the do_select() helper ...
1447      */
1448     if (ts_addr) {
1449         if (time64) {
1450             if (target_to_host_timespec64(&ts, ts_addr)) {
1451                 return -TARGET_EFAULT;
1452             }
1453         } else {
1454             if (target_to_host_timespec(&ts, ts_addr)) {
1455                 return -TARGET_EFAULT;
1456             }
1457         }
1458             ts_ptr = &ts;
1459     } else {
1460         ts_ptr = NULL;
1461     }
1462 
1463     /* Extract the two packed args for the sigset */
1464     sig_ptr = NULL;
1465     if (arg6) {
1466         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1467         if (!arg7) {
1468             return -TARGET_EFAULT;
1469         }
1470         arg_sigset = tswapal(arg7[0]);
1471         arg_sigsize = tswapal(arg7[1]);
1472         unlock_user(arg7, arg6, 0);
1473 
1474         if (arg_sigset) {
1475             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1476             if (ret != 0) {
1477                 return ret;
1478             }
1479             sig_ptr = &sig;
1480             sig.size = SIGSET_T_SIZE;
1481         }
1482     }
1483 
1484     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1485                                   ts_ptr, sig_ptr));
1486 
1487     if (sig_ptr) {
1488         finish_sigsuspend_mask(ret);
1489     }
1490 
1491     if (!is_error(ret)) {
1492         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1493             return -TARGET_EFAULT;
1494         }
1495         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1496             return -TARGET_EFAULT;
1497         }
1498         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1499             return -TARGET_EFAULT;
1500         }
1501         if (time64) {
1502             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1503                 return -TARGET_EFAULT;
1504             }
1505         } else {
1506             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1507                 return -TARGET_EFAULT;
1508             }
1509         }
1510     }
1511     return ret;
1512 }
1513 #endif
1514 
1515 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1516     defined(TARGET_NR_ppoll_time64)
1517 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1518                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1519 {
1520     struct target_pollfd *target_pfd;
1521     unsigned int nfds = arg2;
1522     struct pollfd *pfd;
1523     unsigned int i;
1524     abi_long ret;
1525 
1526     pfd = NULL;
1527     target_pfd = NULL;
1528     if (nfds) {
1529         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1530             return -TARGET_EINVAL;
1531         }
1532         target_pfd = lock_user(VERIFY_WRITE, arg1,
1533                                sizeof(struct target_pollfd) * nfds, 1);
1534         if (!target_pfd) {
1535             return -TARGET_EFAULT;
1536         }
1537 
1538         pfd = alloca(sizeof(struct pollfd) * nfds);
1539         for (i = 0; i < nfds; i++) {
1540             pfd[i].fd = tswap32(target_pfd[i].fd);
1541             pfd[i].events = tswap16(target_pfd[i].events);
1542         }
1543     }
1544     if (ppoll) {
1545         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1546         sigset_t *set = NULL;
1547 
1548         if (arg3) {
1549             if (time64) {
1550                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1551                     unlock_user(target_pfd, arg1, 0);
1552                     return -TARGET_EFAULT;
1553                 }
1554             } else {
1555                 if (target_to_host_timespec(timeout_ts, arg3)) {
1556                     unlock_user(target_pfd, arg1, 0);
1557                     return -TARGET_EFAULT;
1558                 }
1559             }
1560         } else {
1561             timeout_ts = NULL;
1562         }
1563 
1564         if (arg4) {
1565             ret = process_sigsuspend_mask(&set, arg4, arg5);
1566             if (ret != 0) {
1567                 unlock_user(target_pfd, arg1, 0);
1568                 return ret;
1569             }
1570         }
1571 
1572         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1573                                    set, SIGSET_T_SIZE));
1574 
1575         if (set) {
1576             finish_sigsuspend_mask(ret);
1577         }
1578         if (!is_error(ret) && arg3) {
1579             if (time64) {
1580                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1581                     return -TARGET_EFAULT;
1582                 }
1583             } else {
1584                 if (host_to_target_timespec(arg3, timeout_ts)) {
1585                     return -TARGET_EFAULT;
1586                 }
1587             }
1588         }
1589     } else {
1590           struct timespec ts, *pts;
1591 
1592           if (arg3 >= 0) {
1593               /* Convert ms to secs, ns */
1594               ts.tv_sec = arg3 / 1000;
1595               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1596               pts = &ts;
1597           } else {
1598               /* -ve poll() timeout means "infinite" */
1599               pts = NULL;
1600           }
1601           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1602     }
1603 
1604     if (!is_error(ret)) {
1605         for (i = 0; i < nfds; i++) {
1606             target_pfd[i].revents = tswap16(pfd[i].revents);
1607         }
1608     }
1609     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1610     return ret;
1611 }
1612 #endif
1613 
1614 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1615                         int flags, int is_pipe2)
1616 {
1617     int host_pipe[2];
1618     abi_long ret;
1619     ret = pipe2(host_pipe, flags);
1620 
1621     if (is_error(ret))
1622         return get_errno(ret);
1623 
1624     /* Several targets have special calling conventions for the original
1625        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1626     if (!is_pipe2) {
1627 #if defined(TARGET_ALPHA)
1628         cpu_env->ir[IR_A4] = host_pipe[1];
1629         return host_pipe[0];
1630 #elif defined(TARGET_MIPS)
1631         cpu_env->active_tc.gpr[3] = host_pipe[1];
1632         return host_pipe[0];
1633 #elif defined(TARGET_SH4)
1634         cpu_env->gregs[1] = host_pipe[1];
1635         return host_pipe[0];
1636 #elif defined(TARGET_SPARC)
1637         cpu_env->regwptr[1] = host_pipe[1];
1638         return host_pipe[0];
1639 #endif
1640     }
1641 
1642     if (put_user_s32(host_pipe[0], pipedes)
1643         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1644         return -TARGET_EFAULT;
1645     return get_errno(ret);
1646 }
1647 
1648 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1649                                               abi_ulong target_addr,
1650                                               socklen_t len)
1651 {
1652     struct target_ip_mreqn *target_smreqn;
1653 
1654     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1655     if (!target_smreqn)
1656         return -TARGET_EFAULT;
1657     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1658     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1659     if (len == sizeof(struct target_ip_mreqn))
1660         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1661     unlock_user(target_smreqn, target_addr, 0);
1662 
1663     return 0;
1664 }
1665 
1666 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1667                                                abi_ulong target_addr,
1668                                                socklen_t len)
1669 {
1670     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1671     sa_family_t sa_family;
1672     struct target_sockaddr *target_saddr;
1673 
1674     if (fd_trans_target_to_host_addr(fd)) {
1675         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1676     }
1677 
1678     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1679     if (!target_saddr)
1680         return -TARGET_EFAULT;
1681 
1682     sa_family = tswap16(target_saddr->sa_family);
1683 
1684     /* Oops. The caller might send a incomplete sun_path; sun_path
1685      * must be terminated by \0 (see the manual page), but
1686      * unfortunately it is quite common to specify sockaddr_un
1687      * length as "strlen(x->sun_path)" while it should be
1688      * "strlen(...) + 1". We'll fix that here if needed.
1689      * Linux kernel has a similar feature.
1690      */
1691 
1692     if (sa_family == AF_UNIX) {
1693         if (len < unix_maxlen && len > 0) {
1694             char *cp = (char*)target_saddr;
1695 
1696             if ( cp[len-1] && !cp[len] )
1697                 len++;
1698         }
1699         if (len > unix_maxlen)
1700             len = unix_maxlen;
1701     }
1702 
1703     memcpy(addr, target_saddr, len);
1704     addr->sa_family = sa_family;
1705     if (sa_family == AF_NETLINK) {
1706         struct sockaddr_nl *nladdr;
1707 
1708         nladdr = (struct sockaddr_nl *)addr;
1709         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1710         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1711     } else if (sa_family == AF_PACKET) {
1712 	struct target_sockaddr_ll *lladdr;
1713 
1714 	lladdr = (struct target_sockaddr_ll *)addr;
1715 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1716 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1717     }
1718     unlock_user(target_saddr, target_addr, 0);
1719 
1720     return 0;
1721 }
1722 
1723 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1724                                                struct sockaddr *addr,
1725                                                socklen_t len)
1726 {
1727     struct target_sockaddr *target_saddr;
1728 
1729     if (len == 0) {
1730         return 0;
1731     }
1732     assert(addr);
1733 
1734     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1735     if (!target_saddr)
1736         return -TARGET_EFAULT;
1737     memcpy(target_saddr, addr, len);
1738     if (len >= offsetof(struct target_sockaddr, sa_family) +
1739         sizeof(target_saddr->sa_family)) {
1740         target_saddr->sa_family = tswap16(addr->sa_family);
1741     }
1742     if (addr->sa_family == AF_NETLINK &&
1743         len >= sizeof(struct target_sockaddr_nl)) {
1744         struct target_sockaddr_nl *target_nl =
1745                (struct target_sockaddr_nl *)target_saddr;
1746         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1747         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1748     } else if (addr->sa_family == AF_PACKET) {
1749         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1750         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1751         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1752     } else if (addr->sa_family == AF_INET6 &&
1753                len >= sizeof(struct target_sockaddr_in6)) {
1754         struct target_sockaddr_in6 *target_in6 =
1755                (struct target_sockaddr_in6 *)target_saddr;
1756         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1757     }
1758     unlock_user(target_saddr, target_addr, len);
1759 
1760     return 0;
1761 }
1762 
1763 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1764                                            struct target_msghdr *target_msgh)
1765 {
1766     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1767     abi_long msg_controllen;
1768     abi_ulong target_cmsg_addr;
1769     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1770     socklen_t space = 0;
1771 
1772     msg_controllen = tswapal(target_msgh->msg_controllen);
1773     if (msg_controllen < sizeof (struct target_cmsghdr))
1774         goto the_end;
1775     target_cmsg_addr = tswapal(target_msgh->msg_control);
1776     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1777     target_cmsg_start = target_cmsg;
1778     if (!target_cmsg)
1779         return -TARGET_EFAULT;
1780 
1781     while (cmsg && target_cmsg) {
1782         void *data = CMSG_DATA(cmsg);
1783         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1784 
1785         int len = tswapal(target_cmsg->cmsg_len)
1786             - sizeof(struct target_cmsghdr);
1787 
1788         space += CMSG_SPACE(len);
1789         if (space > msgh->msg_controllen) {
1790             space -= CMSG_SPACE(len);
1791             /* This is a QEMU bug, since we allocated the payload
1792              * area ourselves (unlike overflow in host-to-target
1793              * conversion, which is just the guest giving us a buffer
1794              * that's too small). It can't happen for the payload types
1795              * we currently support; if it becomes an issue in future
1796              * we would need to improve our allocation strategy to
1797              * something more intelligent than "twice the size of the
1798              * target buffer we're reading from".
1799              */
1800             qemu_log_mask(LOG_UNIMP,
1801                           ("Unsupported ancillary data %d/%d: "
1802                            "unhandled msg size\n"),
1803                           tswap32(target_cmsg->cmsg_level),
1804                           tswap32(target_cmsg->cmsg_type));
1805             break;
1806         }
1807 
1808         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1809             cmsg->cmsg_level = SOL_SOCKET;
1810         } else {
1811             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1812         }
1813         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1814         cmsg->cmsg_len = CMSG_LEN(len);
1815 
1816         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1817             int *fd = (int *)data;
1818             int *target_fd = (int *)target_data;
1819             int i, numfds = len / sizeof(int);
1820 
1821             for (i = 0; i < numfds; i++) {
1822                 __get_user(fd[i], target_fd + i);
1823             }
1824         } else if (cmsg->cmsg_level == SOL_SOCKET
1825                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1826             struct ucred *cred = (struct ucred *)data;
1827             struct target_ucred *target_cred =
1828                 (struct target_ucred *)target_data;
1829 
1830             __get_user(cred->pid, &target_cred->pid);
1831             __get_user(cred->uid, &target_cred->uid);
1832             __get_user(cred->gid, &target_cred->gid);
1833         } else if (cmsg->cmsg_level == SOL_ALG) {
1834             uint32_t *dst = (uint32_t *)data;
1835 
1836             memcpy(dst, target_data, len);
1837             /* fix endianess of first 32-bit word */
1838             if (len >= sizeof(uint32_t)) {
1839                 *dst = tswap32(*dst);
1840             }
1841         } else {
1842             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1843                           cmsg->cmsg_level, cmsg->cmsg_type);
1844             memcpy(data, target_data, len);
1845         }
1846 
1847         cmsg = CMSG_NXTHDR(msgh, cmsg);
1848         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1849                                          target_cmsg_start);
1850     }
1851     unlock_user(target_cmsg, target_cmsg_addr, 0);
1852  the_end:
1853     msgh->msg_controllen = space;
1854     return 0;
1855 }
1856 
1857 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1858                                            struct msghdr *msgh)
1859 {
1860     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1861     abi_long msg_controllen;
1862     abi_ulong target_cmsg_addr;
1863     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1864     socklen_t space = 0;
1865 
1866     msg_controllen = tswapal(target_msgh->msg_controllen);
1867     if (msg_controllen < sizeof (struct target_cmsghdr))
1868         goto the_end;
1869     target_cmsg_addr = tswapal(target_msgh->msg_control);
1870     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1871     target_cmsg_start = target_cmsg;
1872     if (!target_cmsg)
1873         return -TARGET_EFAULT;
1874 
1875     while (cmsg && target_cmsg) {
1876         void *data = CMSG_DATA(cmsg);
1877         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1878 
1879         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1880         int tgt_len, tgt_space;
1881 
1882         /* We never copy a half-header but may copy half-data;
1883          * this is Linux's behaviour in put_cmsg(). Note that
1884          * truncation here is a guest problem (which we report
1885          * to the guest via the CTRUNC bit), unlike truncation
1886          * in target_to_host_cmsg, which is a QEMU bug.
1887          */
1888         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1889             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1890             break;
1891         }
1892 
1893         if (cmsg->cmsg_level == SOL_SOCKET) {
1894             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1895         } else {
1896             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1897         }
1898         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1899 
1900         /* Payload types which need a different size of payload on
1901          * the target must adjust tgt_len here.
1902          */
1903         tgt_len = len;
1904         switch (cmsg->cmsg_level) {
1905         case SOL_SOCKET:
1906             switch (cmsg->cmsg_type) {
1907             case SO_TIMESTAMP:
1908                 tgt_len = sizeof(struct target_timeval);
1909                 break;
1910             default:
1911                 break;
1912             }
1913             break;
1914         default:
1915             break;
1916         }
1917 
1918         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1919             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1920             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1921         }
1922 
1923         /* We must now copy-and-convert len bytes of payload
1924          * into tgt_len bytes of destination space. Bear in mind
1925          * that in both source and destination we may be dealing
1926          * with a truncated value!
1927          */
1928         switch (cmsg->cmsg_level) {
1929         case SOL_SOCKET:
1930             switch (cmsg->cmsg_type) {
1931             case SCM_RIGHTS:
1932             {
1933                 int *fd = (int *)data;
1934                 int *target_fd = (int *)target_data;
1935                 int i, numfds = tgt_len / sizeof(int);
1936 
1937                 for (i = 0; i < numfds; i++) {
1938                     __put_user(fd[i], target_fd + i);
1939                 }
1940                 break;
1941             }
1942             case SO_TIMESTAMP:
1943             {
1944                 struct timeval *tv = (struct timeval *)data;
1945                 struct target_timeval *target_tv =
1946                     (struct target_timeval *)target_data;
1947 
1948                 if (len != sizeof(struct timeval) ||
1949                     tgt_len != sizeof(struct target_timeval)) {
1950                     goto unimplemented;
1951                 }
1952 
1953                 /* copy struct timeval to target */
1954                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1955                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1956                 break;
1957             }
1958             case SCM_CREDENTIALS:
1959             {
1960                 struct ucred *cred = (struct ucred *)data;
1961                 struct target_ucred *target_cred =
1962                     (struct target_ucred *)target_data;
1963 
1964                 __put_user(cred->pid, &target_cred->pid);
1965                 __put_user(cred->uid, &target_cred->uid);
1966                 __put_user(cred->gid, &target_cred->gid);
1967                 break;
1968             }
1969             default:
1970                 goto unimplemented;
1971             }
1972             break;
1973 
1974         case SOL_IP:
1975             switch (cmsg->cmsg_type) {
1976             case IP_TTL:
1977             {
1978                 uint32_t *v = (uint32_t *)data;
1979                 uint32_t *t_int = (uint32_t *)target_data;
1980 
1981                 if (len != sizeof(uint32_t) ||
1982                     tgt_len != sizeof(uint32_t)) {
1983                     goto unimplemented;
1984                 }
1985                 __put_user(*v, t_int);
1986                 break;
1987             }
1988             case IP_RECVERR:
1989             {
1990                 struct errhdr_t {
1991                    struct sock_extended_err ee;
1992                    struct sockaddr_in offender;
1993                 };
1994                 struct errhdr_t *errh = (struct errhdr_t *)data;
1995                 struct errhdr_t *target_errh =
1996                     (struct errhdr_t *)target_data;
1997 
1998                 if (len != sizeof(struct errhdr_t) ||
1999                     tgt_len != sizeof(struct errhdr_t)) {
2000                     goto unimplemented;
2001                 }
2002                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2003                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2004                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2005                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2006                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2007                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2008                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2009                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2010                     (void *) &errh->offender, sizeof(errh->offender));
2011                 break;
2012             }
2013             default:
2014                 goto unimplemented;
2015             }
2016             break;
2017 
2018         case SOL_IPV6:
2019             switch (cmsg->cmsg_type) {
2020             case IPV6_HOPLIMIT:
2021             {
2022                 uint32_t *v = (uint32_t *)data;
2023                 uint32_t *t_int = (uint32_t *)target_data;
2024 
2025                 if (len != sizeof(uint32_t) ||
2026                     tgt_len != sizeof(uint32_t)) {
2027                     goto unimplemented;
2028                 }
2029                 __put_user(*v, t_int);
2030                 break;
2031             }
2032             case IPV6_RECVERR:
2033             {
2034                 struct errhdr6_t {
2035                    struct sock_extended_err ee;
2036                    struct sockaddr_in6 offender;
2037                 };
2038                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2039                 struct errhdr6_t *target_errh =
2040                     (struct errhdr6_t *)target_data;
2041 
2042                 if (len != sizeof(struct errhdr6_t) ||
2043                     tgt_len != sizeof(struct errhdr6_t)) {
2044                     goto unimplemented;
2045                 }
2046                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2047                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2048                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2049                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2050                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2051                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2052                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2053                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2054                     (void *) &errh->offender, sizeof(errh->offender));
2055                 break;
2056             }
2057             default:
2058                 goto unimplemented;
2059             }
2060             break;
2061 
2062         default:
2063         unimplemented:
2064             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2065                           cmsg->cmsg_level, cmsg->cmsg_type);
2066             memcpy(target_data, data, MIN(len, tgt_len));
2067             if (tgt_len > len) {
2068                 memset(target_data + len, 0, tgt_len - len);
2069             }
2070         }
2071 
2072         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2073         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2074         if (msg_controllen < tgt_space) {
2075             tgt_space = msg_controllen;
2076         }
2077         msg_controllen -= tgt_space;
2078         space += tgt_space;
2079         cmsg = CMSG_NXTHDR(msgh, cmsg);
2080         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2081                                          target_cmsg_start);
2082     }
2083     unlock_user(target_cmsg, target_cmsg_addr, space);
2084  the_end:
2085     target_msgh->msg_controllen = tswapal(space);
2086     return 0;
2087 }
2088 
2089 /* do_setsockopt() Must return target values and target errnos. */
2090 static abi_long do_setsockopt(int sockfd, int level, int optname,
2091                               abi_ulong optval_addr, socklen_t optlen)
2092 {
2093     abi_long ret;
2094     int val;
2095     struct ip_mreqn *ip_mreq;
2096     struct ip_mreq_source *ip_mreq_source;
2097 
2098     switch(level) {
2099     case SOL_TCP:
2100     case SOL_UDP:
2101         /* TCP and UDP options all take an 'int' value.  */
2102         if (optlen < sizeof(uint32_t))
2103             return -TARGET_EINVAL;
2104 
2105         if (get_user_u32(val, optval_addr))
2106             return -TARGET_EFAULT;
2107         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2108         break;
2109     case SOL_IP:
2110         switch(optname) {
2111         case IP_TOS:
2112         case IP_TTL:
2113         case IP_HDRINCL:
2114         case IP_ROUTER_ALERT:
2115         case IP_RECVOPTS:
2116         case IP_RETOPTS:
2117         case IP_PKTINFO:
2118         case IP_MTU_DISCOVER:
2119         case IP_RECVERR:
2120         case IP_RECVTTL:
2121         case IP_RECVTOS:
2122 #ifdef IP_FREEBIND
2123         case IP_FREEBIND:
2124 #endif
2125         case IP_MULTICAST_TTL:
2126         case IP_MULTICAST_LOOP:
2127             val = 0;
2128             if (optlen >= sizeof(uint32_t)) {
2129                 if (get_user_u32(val, optval_addr))
2130                     return -TARGET_EFAULT;
2131             } else if (optlen >= 1) {
2132                 if (get_user_u8(val, optval_addr))
2133                     return -TARGET_EFAULT;
2134             }
2135             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2136             break;
2137         case IP_ADD_MEMBERSHIP:
2138         case IP_DROP_MEMBERSHIP:
2139             if (optlen < sizeof (struct target_ip_mreq) ||
2140                 optlen > sizeof (struct target_ip_mreqn))
2141                 return -TARGET_EINVAL;
2142 
2143             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2144             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2145             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2146             break;
2147 
2148         case IP_BLOCK_SOURCE:
2149         case IP_UNBLOCK_SOURCE:
2150         case IP_ADD_SOURCE_MEMBERSHIP:
2151         case IP_DROP_SOURCE_MEMBERSHIP:
2152             if (optlen != sizeof (struct target_ip_mreq_source))
2153                 return -TARGET_EINVAL;
2154 
2155             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2156             if (!ip_mreq_source) {
2157                 return -TARGET_EFAULT;
2158             }
2159             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2160             unlock_user (ip_mreq_source, optval_addr, 0);
2161             break;
2162 
2163         default:
2164             goto unimplemented;
2165         }
2166         break;
2167     case SOL_IPV6:
2168         switch (optname) {
2169         case IPV6_MTU_DISCOVER:
2170         case IPV6_MTU:
2171         case IPV6_V6ONLY:
2172         case IPV6_RECVPKTINFO:
2173         case IPV6_UNICAST_HOPS:
2174         case IPV6_MULTICAST_HOPS:
2175         case IPV6_MULTICAST_LOOP:
2176         case IPV6_RECVERR:
2177         case IPV6_RECVHOPLIMIT:
2178         case IPV6_2292HOPLIMIT:
2179         case IPV6_CHECKSUM:
2180         case IPV6_ADDRFORM:
2181         case IPV6_2292PKTINFO:
2182         case IPV6_RECVTCLASS:
2183         case IPV6_RECVRTHDR:
2184         case IPV6_2292RTHDR:
2185         case IPV6_RECVHOPOPTS:
2186         case IPV6_2292HOPOPTS:
2187         case IPV6_RECVDSTOPTS:
2188         case IPV6_2292DSTOPTS:
2189         case IPV6_TCLASS:
2190         case IPV6_ADDR_PREFERENCES:
2191 #ifdef IPV6_RECVPATHMTU
2192         case IPV6_RECVPATHMTU:
2193 #endif
2194 #ifdef IPV6_TRANSPARENT
2195         case IPV6_TRANSPARENT:
2196 #endif
2197 #ifdef IPV6_FREEBIND
2198         case IPV6_FREEBIND:
2199 #endif
2200 #ifdef IPV6_RECVORIGDSTADDR
2201         case IPV6_RECVORIGDSTADDR:
2202 #endif
2203             val = 0;
2204             if (optlen < sizeof(uint32_t)) {
2205                 return -TARGET_EINVAL;
2206             }
2207             if (get_user_u32(val, optval_addr)) {
2208                 return -TARGET_EFAULT;
2209             }
2210             ret = get_errno(setsockopt(sockfd, level, optname,
2211                                        &val, sizeof(val)));
2212             break;
2213         case IPV6_PKTINFO:
2214         {
2215             struct in6_pktinfo pki;
2216 
2217             if (optlen < sizeof(pki)) {
2218                 return -TARGET_EINVAL;
2219             }
2220 
2221             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2222                 return -TARGET_EFAULT;
2223             }
2224 
2225             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2226 
2227             ret = get_errno(setsockopt(sockfd, level, optname,
2228                                        &pki, sizeof(pki)));
2229             break;
2230         }
2231         case IPV6_ADD_MEMBERSHIP:
2232         case IPV6_DROP_MEMBERSHIP:
2233         {
2234             struct ipv6_mreq ipv6mreq;
2235 
2236             if (optlen < sizeof(ipv6mreq)) {
2237                 return -TARGET_EINVAL;
2238             }
2239 
2240             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2241                 return -TARGET_EFAULT;
2242             }
2243 
2244             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2245 
2246             ret = get_errno(setsockopt(sockfd, level, optname,
2247                                        &ipv6mreq, sizeof(ipv6mreq)));
2248             break;
2249         }
2250         default:
2251             goto unimplemented;
2252         }
2253         break;
2254     case SOL_ICMPV6:
2255         switch (optname) {
2256         case ICMPV6_FILTER:
2257         {
2258             struct icmp6_filter icmp6f;
2259 
2260             if (optlen > sizeof(icmp6f)) {
2261                 optlen = sizeof(icmp6f);
2262             }
2263 
2264             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2265                 return -TARGET_EFAULT;
2266             }
2267 
2268             for (val = 0; val < 8; val++) {
2269                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2270             }
2271 
2272             ret = get_errno(setsockopt(sockfd, level, optname,
2273                                        &icmp6f, optlen));
2274             break;
2275         }
2276         default:
2277             goto unimplemented;
2278         }
2279         break;
2280     case SOL_RAW:
2281         switch (optname) {
2282         case ICMP_FILTER:
2283         case IPV6_CHECKSUM:
2284             /* those take an u32 value */
2285             if (optlen < sizeof(uint32_t)) {
2286                 return -TARGET_EINVAL;
2287             }
2288 
2289             if (get_user_u32(val, optval_addr)) {
2290                 return -TARGET_EFAULT;
2291             }
2292             ret = get_errno(setsockopt(sockfd, level, optname,
2293                                        &val, sizeof(val)));
2294             break;
2295 
2296         default:
2297             goto unimplemented;
2298         }
2299         break;
2300 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2301     case SOL_ALG:
2302         switch (optname) {
2303         case ALG_SET_KEY:
2304         {
2305             char *alg_key = g_malloc(optlen);
2306 
2307             if (!alg_key) {
2308                 return -TARGET_ENOMEM;
2309             }
2310             if (copy_from_user(alg_key, optval_addr, optlen)) {
2311                 g_free(alg_key);
2312                 return -TARGET_EFAULT;
2313             }
2314             ret = get_errno(setsockopt(sockfd, level, optname,
2315                                        alg_key, optlen));
2316             g_free(alg_key);
2317             break;
2318         }
2319         case ALG_SET_AEAD_AUTHSIZE:
2320         {
2321             ret = get_errno(setsockopt(sockfd, level, optname,
2322                                        NULL, optlen));
2323             break;
2324         }
2325         default:
2326             goto unimplemented;
2327         }
2328         break;
2329 #endif
2330     case TARGET_SOL_SOCKET:
2331         switch (optname) {
2332         case TARGET_SO_RCVTIMEO:
2333         {
2334                 struct timeval tv;
2335 
2336                 optname = SO_RCVTIMEO;
2337 
2338 set_timeout:
2339                 if (optlen != sizeof(struct target_timeval)) {
2340                     return -TARGET_EINVAL;
2341                 }
2342 
2343                 if (copy_from_user_timeval(&tv, optval_addr)) {
2344                     return -TARGET_EFAULT;
2345                 }
2346 
2347                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2348                                 &tv, sizeof(tv)));
2349                 return ret;
2350         }
2351         case TARGET_SO_SNDTIMEO:
2352                 optname = SO_SNDTIMEO;
2353                 goto set_timeout;
2354         case TARGET_SO_ATTACH_FILTER:
2355         {
2356                 struct target_sock_fprog *tfprog;
2357                 struct target_sock_filter *tfilter;
2358                 struct sock_fprog fprog;
2359                 struct sock_filter *filter;
2360                 int i;
2361 
2362                 if (optlen != sizeof(*tfprog)) {
2363                     return -TARGET_EINVAL;
2364                 }
2365                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2366                     return -TARGET_EFAULT;
2367                 }
2368                 if (!lock_user_struct(VERIFY_READ, tfilter,
2369                                       tswapal(tfprog->filter), 0)) {
2370                     unlock_user_struct(tfprog, optval_addr, 1);
2371                     return -TARGET_EFAULT;
2372                 }
2373 
2374                 fprog.len = tswap16(tfprog->len);
2375                 filter = g_try_new(struct sock_filter, fprog.len);
2376                 if (filter == NULL) {
2377                     unlock_user_struct(tfilter, tfprog->filter, 1);
2378                     unlock_user_struct(tfprog, optval_addr, 1);
2379                     return -TARGET_ENOMEM;
2380                 }
2381                 for (i = 0; i < fprog.len; i++) {
2382                     filter[i].code = tswap16(tfilter[i].code);
2383                     filter[i].jt = tfilter[i].jt;
2384                     filter[i].jf = tfilter[i].jf;
2385                     filter[i].k = tswap32(tfilter[i].k);
2386                 }
2387                 fprog.filter = filter;
2388 
2389                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2390                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2391                 g_free(filter);
2392 
2393                 unlock_user_struct(tfilter, tfprog->filter, 1);
2394                 unlock_user_struct(tfprog, optval_addr, 1);
2395                 return ret;
2396         }
2397 	case TARGET_SO_BINDTODEVICE:
2398 	{
2399 		char *dev_ifname, *addr_ifname;
2400 
2401 		if (optlen > IFNAMSIZ - 1) {
2402 		    optlen = IFNAMSIZ - 1;
2403 		}
2404 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2405 		if (!dev_ifname) {
2406 		    return -TARGET_EFAULT;
2407 		}
2408 		optname = SO_BINDTODEVICE;
2409 		addr_ifname = alloca(IFNAMSIZ);
2410 		memcpy(addr_ifname, dev_ifname, optlen);
2411 		addr_ifname[optlen] = 0;
2412 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2413                                            addr_ifname, optlen));
2414 		unlock_user (dev_ifname, optval_addr, 0);
2415 		return ret;
2416 	}
2417         case TARGET_SO_LINGER:
2418         {
2419                 struct linger lg;
2420                 struct target_linger *tlg;
2421 
2422                 if (optlen != sizeof(struct target_linger)) {
2423                     return -TARGET_EINVAL;
2424                 }
2425                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2426                     return -TARGET_EFAULT;
2427                 }
2428                 __get_user(lg.l_onoff, &tlg->l_onoff);
2429                 __get_user(lg.l_linger, &tlg->l_linger);
2430                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2431                                 &lg, sizeof(lg)));
2432                 unlock_user_struct(tlg, optval_addr, 0);
2433                 return ret;
2434         }
2435             /* Options with 'int' argument.  */
2436         case TARGET_SO_DEBUG:
2437 		optname = SO_DEBUG;
2438 		break;
2439         case TARGET_SO_REUSEADDR:
2440 		optname = SO_REUSEADDR;
2441 		break;
2442 #ifdef SO_REUSEPORT
2443         case TARGET_SO_REUSEPORT:
2444                 optname = SO_REUSEPORT;
2445                 break;
2446 #endif
2447         case TARGET_SO_TYPE:
2448 		optname = SO_TYPE;
2449 		break;
2450         case TARGET_SO_ERROR:
2451 		optname = SO_ERROR;
2452 		break;
2453         case TARGET_SO_DONTROUTE:
2454 		optname = SO_DONTROUTE;
2455 		break;
2456         case TARGET_SO_BROADCAST:
2457 		optname = SO_BROADCAST;
2458 		break;
2459         case TARGET_SO_SNDBUF:
2460 		optname = SO_SNDBUF;
2461 		break;
2462         case TARGET_SO_SNDBUFFORCE:
2463                 optname = SO_SNDBUFFORCE;
2464                 break;
2465         case TARGET_SO_RCVBUF:
2466 		optname = SO_RCVBUF;
2467 		break;
2468         case TARGET_SO_RCVBUFFORCE:
2469                 optname = SO_RCVBUFFORCE;
2470                 break;
2471         case TARGET_SO_KEEPALIVE:
2472 		optname = SO_KEEPALIVE;
2473 		break;
2474         case TARGET_SO_OOBINLINE:
2475 		optname = SO_OOBINLINE;
2476 		break;
2477         case TARGET_SO_NO_CHECK:
2478 		optname = SO_NO_CHECK;
2479 		break;
2480         case TARGET_SO_PRIORITY:
2481 		optname = SO_PRIORITY;
2482 		break;
2483 #ifdef SO_BSDCOMPAT
2484         case TARGET_SO_BSDCOMPAT:
2485 		optname = SO_BSDCOMPAT;
2486 		break;
2487 #endif
2488         case TARGET_SO_PASSCRED:
2489 		optname = SO_PASSCRED;
2490 		break;
2491         case TARGET_SO_PASSSEC:
2492                 optname = SO_PASSSEC;
2493                 break;
2494         case TARGET_SO_TIMESTAMP:
2495 		optname = SO_TIMESTAMP;
2496 		break;
2497         case TARGET_SO_RCVLOWAT:
2498 		optname = SO_RCVLOWAT;
2499 		break;
2500         default:
2501             goto unimplemented;
2502         }
2503 	if (optlen < sizeof(uint32_t))
2504             return -TARGET_EINVAL;
2505 
2506 	if (get_user_u32(val, optval_addr))
2507             return -TARGET_EFAULT;
2508 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2509         break;
2510 #ifdef SOL_NETLINK
2511     case SOL_NETLINK:
2512         switch (optname) {
2513         case NETLINK_PKTINFO:
2514         case NETLINK_ADD_MEMBERSHIP:
2515         case NETLINK_DROP_MEMBERSHIP:
2516         case NETLINK_BROADCAST_ERROR:
2517         case NETLINK_NO_ENOBUFS:
2518 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2519         case NETLINK_LISTEN_ALL_NSID:
2520         case NETLINK_CAP_ACK:
2521 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2522 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2523         case NETLINK_EXT_ACK:
2524 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2525 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2526         case NETLINK_GET_STRICT_CHK:
2527 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2528             break;
2529         default:
2530             goto unimplemented;
2531         }
2532         val = 0;
2533         if (optlen < sizeof(uint32_t)) {
2534             return -TARGET_EINVAL;
2535         }
2536         if (get_user_u32(val, optval_addr)) {
2537             return -TARGET_EFAULT;
2538         }
2539         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2540                                    sizeof(val)));
2541         break;
2542 #endif /* SOL_NETLINK */
2543     default:
2544     unimplemented:
2545         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2546                       level, optname);
2547         ret = -TARGET_ENOPROTOOPT;
2548     }
2549     return ret;
2550 }
2551 
2552 /* do_getsockopt() Must return target values and target errnos. */
2553 static abi_long do_getsockopt(int sockfd, int level, int optname,
2554                               abi_ulong optval_addr, abi_ulong optlen)
2555 {
2556     abi_long ret;
2557     int len, val;
2558     socklen_t lv;
2559 
2560     switch(level) {
2561     case TARGET_SOL_SOCKET:
2562         level = SOL_SOCKET;
2563         switch (optname) {
2564         /* These don't just return a single integer */
2565         case TARGET_SO_PEERNAME:
2566             goto unimplemented;
2567         case TARGET_SO_RCVTIMEO: {
2568             struct timeval tv;
2569             socklen_t tvlen;
2570 
2571             optname = SO_RCVTIMEO;
2572 
2573 get_timeout:
2574             if (get_user_u32(len, optlen)) {
2575                 return -TARGET_EFAULT;
2576             }
2577             if (len < 0) {
2578                 return -TARGET_EINVAL;
2579             }
2580 
2581             tvlen = sizeof(tv);
2582             ret = get_errno(getsockopt(sockfd, level, optname,
2583                                        &tv, &tvlen));
2584             if (ret < 0) {
2585                 return ret;
2586             }
2587             if (len > sizeof(struct target_timeval)) {
2588                 len = sizeof(struct target_timeval);
2589             }
2590             if (copy_to_user_timeval(optval_addr, &tv)) {
2591                 return -TARGET_EFAULT;
2592             }
2593             if (put_user_u32(len, optlen)) {
2594                 return -TARGET_EFAULT;
2595             }
2596             break;
2597         }
2598         case TARGET_SO_SNDTIMEO:
2599             optname = SO_SNDTIMEO;
2600             goto get_timeout;
2601         case TARGET_SO_PEERCRED: {
2602             struct ucred cr;
2603             socklen_t crlen;
2604             struct target_ucred *tcr;
2605 
2606             if (get_user_u32(len, optlen)) {
2607                 return -TARGET_EFAULT;
2608             }
2609             if (len < 0) {
2610                 return -TARGET_EINVAL;
2611             }
2612 
2613             crlen = sizeof(cr);
2614             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2615                                        &cr, &crlen));
2616             if (ret < 0) {
2617                 return ret;
2618             }
2619             if (len > crlen) {
2620                 len = crlen;
2621             }
2622             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2623                 return -TARGET_EFAULT;
2624             }
2625             __put_user(cr.pid, &tcr->pid);
2626             __put_user(cr.uid, &tcr->uid);
2627             __put_user(cr.gid, &tcr->gid);
2628             unlock_user_struct(tcr, optval_addr, 1);
2629             if (put_user_u32(len, optlen)) {
2630                 return -TARGET_EFAULT;
2631             }
2632             break;
2633         }
2634         case TARGET_SO_PEERSEC: {
2635             char *name;
2636 
2637             if (get_user_u32(len, optlen)) {
2638                 return -TARGET_EFAULT;
2639             }
2640             if (len < 0) {
2641                 return -TARGET_EINVAL;
2642             }
2643             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2644             if (!name) {
2645                 return -TARGET_EFAULT;
2646             }
2647             lv = len;
2648             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2649                                        name, &lv));
2650             if (put_user_u32(lv, optlen)) {
2651                 ret = -TARGET_EFAULT;
2652             }
2653             unlock_user(name, optval_addr, lv);
2654             break;
2655         }
2656         case TARGET_SO_LINGER:
2657         {
2658             struct linger lg;
2659             socklen_t lglen;
2660             struct target_linger *tlg;
2661 
2662             if (get_user_u32(len, optlen)) {
2663                 return -TARGET_EFAULT;
2664             }
2665             if (len < 0) {
2666                 return -TARGET_EINVAL;
2667             }
2668 
2669             lglen = sizeof(lg);
2670             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2671                                        &lg, &lglen));
2672             if (ret < 0) {
2673                 return ret;
2674             }
2675             if (len > lglen) {
2676                 len = lglen;
2677             }
2678             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2679                 return -TARGET_EFAULT;
2680             }
2681             __put_user(lg.l_onoff, &tlg->l_onoff);
2682             __put_user(lg.l_linger, &tlg->l_linger);
2683             unlock_user_struct(tlg, optval_addr, 1);
2684             if (put_user_u32(len, optlen)) {
2685                 return -TARGET_EFAULT;
2686             }
2687             break;
2688         }
2689         /* Options with 'int' argument.  */
2690         case TARGET_SO_DEBUG:
2691             optname = SO_DEBUG;
2692             goto int_case;
2693         case TARGET_SO_REUSEADDR:
2694             optname = SO_REUSEADDR;
2695             goto int_case;
2696 #ifdef SO_REUSEPORT
2697         case TARGET_SO_REUSEPORT:
2698             optname = SO_REUSEPORT;
2699             goto int_case;
2700 #endif
2701         case TARGET_SO_TYPE:
2702             optname = SO_TYPE;
2703             goto int_case;
2704         case TARGET_SO_ERROR:
2705             optname = SO_ERROR;
2706             goto int_case;
2707         case TARGET_SO_DONTROUTE:
2708             optname = SO_DONTROUTE;
2709             goto int_case;
2710         case TARGET_SO_BROADCAST:
2711             optname = SO_BROADCAST;
2712             goto int_case;
2713         case TARGET_SO_SNDBUF:
2714             optname = SO_SNDBUF;
2715             goto int_case;
2716         case TARGET_SO_RCVBUF:
2717             optname = SO_RCVBUF;
2718             goto int_case;
2719         case TARGET_SO_KEEPALIVE:
2720             optname = SO_KEEPALIVE;
2721             goto int_case;
2722         case TARGET_SO_OOBINLINE:
2723             optname = SO_OOBINLINE;
2724             goto int_case;
2725         case TARGET_SO_NO_CHECK:
2726             optname = SO_NO_CHECK;
2727             goto int_case;
2728         case TARGET_SO_PRIORITY:
2729             optname = SO_PRIORITY;
2730             goto int_case;
2731 #ifdef SO_BSDCOMPAT
2732         case TARGET_SO_BSDCOMPAT:
2733             optname = SO_BSDCOMPAT;
2734             goto int_case;
2735 #endif
2736         case TARGET_SO_PASSCRED:
2737             optname = SO_PASSCRED;
2738             goto int_case;
2739         case TARGET_SO_TIMESTAMP:
2740             optname = SO_TIMESTAMP;
2741             goto int_case;
2742         case TARGET_SO_RCVLOWAT:
2743             optname = SO_RCVLOWAT;
2744             goto int_case;
2745         case TARGET_SO_ACCEPTCONN:
2746             optname = SO_ACCEPTCONN;
2747             goto int_case;
2748         case TARGET_SO_PROTOCOL:
2749             optname = SO_PROTOCOL;
2750             goto int_case;
2751         case TARGET_SO_DOMAIN:
2752             optname = SO_DOMAIN;
2753             goto int_case;
2754         default:
2755             goto int_case;
2756         }
2757         break;
2758     case SOL_TCP:
2759     case SOL_UDP:
2760         /* TCP and UDP options all take an 'int' value.  */
2761     int_case:
2762         if (get_user_u32(len, optlen))
2763             return -TARGET_EFAULT;
2764         if (len < 0)
2765             return -TARGET_EINVAL;
2766         lv = sizeof(lv);
2767         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2768         if (ret < 0)
2769             return ret;
2770         switch (optname) {
2771         case SO_TYPE:
2772             val = host_to_target_sock_type(val);
2773             break;
2774         case SO_ERROR:
2775             val = host_to_target_errno(val);
2776             break;
2777         }
2778         if (len > lv)
2779             len = lv;
2780         if (len == 4) {
2781             if (put_user_u32(val, optval_addr))
2782                 return -TARGET_EFAULT;
2783         } else {
2784             if (put_user_u8(val, optval_addr))
2785                 return -TARGET_EFAULT;
2786         }
2787         if (put_user_u32(len, optlen))
2788             return -TARGET_EFAULT;
2789         break;
2790     case SOL_IP:
2791         switch(optname) {
2792         case IP_TOS:
2793         case IP_TTL:
2794         case IP_HDRINCL:
2795         case IP_ROUTER_ALERT:
2796         case IP_RECVOPTS:
2797         case IP_RETOPTS:
2798         case IP_PKTINFO:
2799         case IP_MTU_DISCOVER:
2800         case IP_RECVERR:
2801         case IP_RECVTOS:
2802 #ifdef IP_FREEBIND
2803         case IP_FREEBIND:
2804 #endif
2805         case IP_MULTICAST_TTL:
2806         case IP_MULTICAST_LOOP:
2807             if (get_user_u32(len, optlen))
2808                 return -TARGET_EFAULT;
2809             if (len < 0)
2810                 return -TARGET_EINVAL;
2811             lv = sizeof(lv);
2812             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2813             if (ret < 0)
2814                 return ret;
2815             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2816                 len = 1;
2817                 if (put_user_u32(len, optlen)
2818                     || put_user_u8(val, optval_addr))
2819                     return -TARGET_EFAULT;
2820             } else {
2821                 if (len > sizeof(int))
2822                     len = sizeof(int);
2823                 if (put_user_u32(len, optlen)
2824                     || put_user_u32(val, optval_addr))
2825                     return -TARGET_EFAULT;
2826             }
2827             break;
2828         default:
2829             ret = -TARGET_ENOPROTOOPT;
2830             break;
2831         }
2832         break;
2833     case SOL_IPV6:
2834         switch (optname) {
2835         case IPV6_MTU_DISCOVER:
2836         case IPV6_MTU:
2837         case IPV6_V6ONLY:
2838         case IPV6_RECVPKTINFO:
2839         case IPV6_UNICAST_HOPS:
2840         case IPV6_MULTICAST_HOPS:
2841         case IPV6_MULTICAST_LOOP:
2842         case IPV6_RECVERR:
2843         case IPV6_RECVHOPLIMIT:
2844         case IPV6_2292HOPLIMIT:
2845         case IPV6_CHECKSUM:
2846         case IPV6_ADDRFORM:
2847         case IPV6_2292PKTINFO:
2848         case IPV6_RECVTCLASS:
2849         case IPV6_RECVRTHDR:
2850         case IPV6_2292RTHDR:
2851         case IPV6_RECVHOPOPTS:
2852         case IPV6_2292HOPOPTS:
2853         case IPV6_RECVDSTOPTS:
2854         case IPV6_2292DSTOPTS:
2855         case IPV6_TCLASS:
2856         case IPV6_ADDR_PREFERENCES:
2857 #ifdef IPV6_RECVPATHMTU
2858         case IPV6_RECVPATHMTU:
2859 #endif
2860 #ifdef IPV6_TRANSPARENT
2861         case IPV6_TRANSPARENT:
2862 #endif
2863 #ifdef IPV6_FREEBIND
2864         case IPV6_FREEBIND:
2865 #endif
2866 #ifdef IPV6_RECVORIGDSTADDR
2867         case IPV6_RECVORIGDSTADDR:
2868 #endif
2869             if (get_user_u32(len, optlen))
2870                 return -TARGET_EFAULT;
2871             if (len < 0)
2872                 return -TARGET_EINVAL;
2873             lv = sizeof(lv);
2874             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2875             if (ret < 0)
2876                 return ret;
2877             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2878                 len = 1;
2879                 if (put_user_u32(len, optlen)
2880                     || put_user_u8(val, optval_addr))
2881                     return -TARGET_EFAULT;
2882             } else {
2883                 if (len > sizeof(int))
2884                     len = sizeof(int);
2885                 if (put_user_u32(len, optlen)
2886                     || put_user_u32(val, optval_addr))
2887                     return -TARGET_EFAULT;
2888             }
2889             break;
2890         default:
2891             ret = -TARGET_ENOPROTOOPT;
2892             break;
2893         }
2894         break;
2895 #ifdef SOL_NETLINK
2896     case SOL_NETLINK:
2897         switch (optname) {
2898         case NETLINK_PKTINFO:
2899         case NETLINK_BROADCAST_ERROR:
2900         case NETLINK_NO_ENOBUFS:
2901 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2902         case NETLINK_LISTEN_ALL_NSID:
2903         case NETLINK_CAP_ACK:
2904 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2905 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2906         case NETLINK_EXT_ACK:
2907 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2908 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2909         case NETLINK_GET_STRICT_CHK:
2910 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2911             if (get_user_u32(len, optlen)) {
2912                 return -TARGET_EFAULT;
2913             }
2914             if (len != sizeof(val)) {
2915                 return -TARGET_EINVAL;
2916             }
2917             lv = len;
2918             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2919             if (ret < 0) {
2920                 return ret;
2921             }
2922             if (put_user_u32(lv, optlen)
2923                 || put_user_u32(val, optval_addr)) {
2924                 return -TARGET_EFAULT;
2925             }
2926             break;
2927 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2928         case NETLINK_LIST_MEMBERSHIPS:
2929         {
2930             uint32_t *results;
2931             int i;
2932             if (get_user_u32(len, optlen)) {
2933                 return -TARGET_EFAULT;
2934             }
2935             if (len < 0) {
2936                 return -TARGET_EINVAL;
2937             }
2938             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2939             if (!results && len > 0) {
2940                 return -TARGET_EFAULT;
2941             }
2942             lv = len;
2943             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2944             if (ret < 0) {
2945                 unlock_user(results, optval_addr, 0);
2946                 return ret;
2947             }
2948             /* swap host endianess to target endianess. */
2949             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2950                 results[i] = tswap32(results[i]);
2951             }
2952             if (put_user_u32(lv, optlen)) {
2953                 return -TARGET_EFAULT;
2954             }
2955             unlock_user(results, optval_addr, 0);
2956             break;
2957         }
2958 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2959         default:
2960             goto unimplemented;
2961         }
2962         break;
2963 #endif /* SOL_NETLINK */
2964     default:
2965     unimplemented:
2966         qemu_log_mask(LOG_UNIMP,
2967                       "getsockopt level=%d optname=%d not yet supported\n",
2968                       level, optname);
2969         ret = -TARGET_EOPNOTSUPP;
2970         break;
2971     }
2972     return ret;
2973 }
2974 
2975 /* Convert target low/high pair representing file offset into the host
2976  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2977  * as the kernel doesn't handle them either.
2978  */
2979 static void target_to_host_low_high(abi_ulong tlow,
2980                                     abi_ulong thigh,
2981                                     unsigned long *hlow,
2982                                     unsigned long *hhigh)
2983 {
2984     uint64_t off = tlow |
2985         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2986         TARGET_LONG_BITS / 2;
2987 
2988     *hlow = off;
2989     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2990 }
2991 
2992 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2993                                 abi_ulong count, int copy)
2994 {
2995     struct target_iovec *target_vec;
2996     struct iovec *vec;
2997     abi_ulong total_len, max_len;
2998     int i;
2999     int err = 0;
3000     bool bad_address = false;
3001 
3002     if (count == 0) {
3003         errno = 0;
3004         return NULL;
3005     }
3006     if (count > IOV_MAX) {
3007         errno = EINVAL;
3008         return NULL;
3009     }
3010 
3011     vec = g_try_new0(struct iovec, count);
3012     if (vec == NULL) {
3013         errno = ENOMEM;
3014         return NULL;
3015     }
3016 
3017     target_vec = lock_user(VERIFY_READ, target_addr,
3018                            count * sizeof(struct target_iovec), 1);
3019     if (target_vec == NULL) {
3020         err = EFAULT;
3021         goto fail2;
3022     }
3023 
3024     /* ??? If host page size > target page size, this will result in a
3025        value larger than what we can actually support.  */
3026     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3027     total_len = 0;
3028 
3029     for (i = 0; i < count; i++) {
3030         abi_ulong base = tswapal(target_vec[i].iov_base);
3031         abi_long len = tswapal(target_vec[i].iov_len);
3032 
3033         if (len < 0) {
3034             err = EINVAL;
3035             goto fail;
3036         } else if (len == 0) {
3037             /* Zero length pointer is ignored.  */
3038             vec[i].iov_base = 0;
3039         } else {
3040             vec[i].iov_base = lock_user(type, base, len, copy);
3041             /* If the first buffer pointer is bad, this is a fault.  But
3042              * subsequent bad buffers will result in a partial write; this
3043              * is realized by filling the vector with null pointers and
3044              * zero lengths. */
3045             if (!vec[i].iov_base) {
3046                 if (i == 0) {
3047                     err = EFAULT;
3048                     goto fail;
3049                 } else {
3050                     bad_address = true;
3051                 }
3052             }
3053             if (bad_address) {
3054                 len = 0;
3055             }
3056             if (len > max_len - total_len) {
3057                 len = max_len - total_len;
3058             }
3059         }
3060         vec[i].iov_len = len;
3061         total_len += len;
3062     }
3063 
3064     unlock_user(target_vec, target_addr, 0);
3065     return vec;
3066 
3067  fail:
3068     while (--i >= 0) {
3069         if (tswapal(target_vec[i].iov_len) > 0) {
3070             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3071         }
3072     }
3073     unlock_user(target_vec, target_addr, 0);
3074  fail2:
3075     g_free(vec);
3076     errno = err;
3077     return NULL;
3078 }
3079 
3080 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3081                          abi_ulong count, int copy)
3082 {
3083     struct target_iovec *target_vec;
3084     int i;
3085 
3086     target_vec = lock_user(VERIFY_READ, target_addr,
3087                            count * sizeof(struct target_iovec), 1);
3088     if (target_vec) {
3089         for (i = 0; i < count; i++) {
3090             abi_ulong base = tswapal(target_vec[i].iov_base);
3091             abi_long len = tswapal(target_vec[i].iov_len);
3092             if (len < 0) {
3093                 break;
3094             }
3095             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3096         }
3097         unlock_user(target_vec, target_addr, 0);
3098     }
3099 
3100     g_free(vec);
3101 }
3102 
3103 static inline int target_to_host_sock_type(int *type)
3104 {
3105     int host_type = 0;
3106     int target_type = *type;
3107 
3108     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3109     case TARGET_SOCK_DGRAM:
3110         host_type = SOCK_DGRAM;
3111         break;
3112     case TARGET_SOCK_STREAM:
3113         host_type = SOCK_STREAM;
3114         break;
3115     default:
3116         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3117         break;
3118     }
3119     if (target_type & TARGET_SOCK_CLOEXEC) {
3120 #if defined(SOCK_CLOEXEC)
3121         host_type |= SOCK_CLOEXEC;
3122 #else
3123         return -TARGET_EINVAL;
3124 #endif
3125     }
3126     if (target_type & TARGET_SOCK_NONBLOCK) {
3127 #if defined(SOCK_NONBLOCK)
3128         host_type |= SOCK_NONBLOCK;
3129 #elif !defined(O_NONBLOCK)
3130         return -TARGET_EINVAL;
3131 #endif
3132     }
3133     *type = host_type;
3134     return 0;
3135 }
3136 
3137 /* Try to emulate socket type flags after socket creation.  */
3138 static int sock_flags_fixup(int fd, int target_type)
3139 {
3140 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3141     if (target_type & TARGET_SOCK_NONBLOCK) {
3142         int flags = fcntl(fd, F_GETFL);
3143         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3144             close(fd);
3145             return -TARGET_EINVAL;
3146         }
3147     }
3148 #endif
3149     return fd;
3150 }
3151 
3152 /* do_socket() Must return target values and target errnos. */
3153 static abi_long do_socket(int domain, int type, int protocol)
3154 {
3155     int target_type = type;
3156     int ret;
3157 
3158     ret = target_to_host_sock_type(&type);
3159     if (ret) {
3160         return ret;
3161     }
3162 
3163     if (domain == PF_NETLINK && !(
3164 #ifdef CONFIG_RTNETLINK
3165          protocol == NETLINK_ROUTE ||
3166 #endif
3167          protocol == NETLINK_KOBJECT_UEVENT ||
3168          protocol == NETLINK_AUDIT)) {
3169         return -TARGET_EPROTONOSUPPORT;
3170     }
3171 
3172     if (domain == AF_PACKET ||
3173         (domain == AF_INET && type == SOCK_PACKET)) {
3174         protocol = tswap16(protocol);
3175     }
3176 
3177     ret = get_errno(socket(domain, type, protocol));
3178     if (ret >= 0) {
3179         ret = sock_flags_fixup(ret, target_type);
3180         if (type == SOCK_PACKET) {
3181             /* Manage an obsolete case :
3182              * if socket type is SOCK_PACKET, bind by name
3183              */
3184             fd_trans_register(ret, &target_packet_trans);
3185         } else if (domain == PF_NETLINK) {
3186             switch (protocol) {
3187 #ifdef CONFIG_RTNETLINK
3188             case NETLINK_ROUTE:
3189                 fd_trans_register(ret, &target_netlink_route_trans);
3190                 break;
3191 #endif
3192             case NETLINK_KOBJECT_UEVENT:
3193                 /* nothing to do: messages are strings */
3194                 break;
3195             case NETLINK_AUDIT:
3196                 fd_trans_register(ret, &target_netlink_audit_trans);
3197                 break;
3198             default:
3199                 g_assert_not_reached();
3200             }
3201         }
3202     }
3203     return ret;
3204 }
3205 
3206 /* do_bind() Must return target values and target errnos. */
3207 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3208                         socklen_t addrlen)
3209 {
3210     void *addr;
3211     abi_long ret;
3212 
3213     if ((int)addrlen < 0) {
3214         return -TARGET_EINVAL;
3215     }
3216 
3217     addr = alloca(addrlen+1);
3218 
3219     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3220     if (ret)
3221         return ret;
3222 
3223     return get_errno(bind(sockfd, addr, addrlen));
3224 }
3225 
3226 /* do_connect() Must return target values and target errnos. */
3227 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3228                            socklen_t addrlen)
3229 {
3230     void *addr;
3231     abi_long ret;
3232 
3233     if ((int)addrlen < 0) {
3234         return -TARGET_EINVAL;
3235     }
3236 
3237     addr = alloca(addrlen+1);
3238 
3239     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3240     if (ret)
3241         return ret;
3242 
3243     return get_errno(safe_connect(sockfd, addr, addrlen));
3244 }
3245 
3246 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3247 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3248                                       int flags, int send)
3249 {
3250     abi_long ret, len;
3251     struct msghdr msg;
3252     abi_ulong count;
3253     struct iovec *vec;
3254     abi_ulong target_vec;
3255 
3256     if (msgp->msg_name) {
3257         msg.msg_namelen = tswap32(msgp->msg_namelen);
3258         msg.msg_name = alloca(msg.msg_namelen+1);
3259         ret = target_to_host_sockaddr(fd, msg.msg_name,
3260                                       tswapal(msgp->msg_name),
3261                                       msg.msg_namelen);
3262         if (ret == -TARGET_EFAULT) {
3263             /* For connected sockets msg_name and msg_namelen must
3264              * be ignored, so returning EFAULT immediately is wrong.
3265              * Instead, pass a bad msg_name to the host kernel, and
3266              * let it decide whether to return EFAULT or not.
3267              */
3268             msg.msg_name = (void *)-1;
3269         } else if (ret) {
3270             goto out2;
3271         }
3272     } else {
3273         msg.msg_name = NULL;
3274         msg.msg_namelen = 0;
3275     }
3276     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3277     msg.msg_control = alloca(msg.msg_controllen);
3278     memset(msg.msg_control, 0, msg.msg_controllen);
3279 
3280     msg.msg_flags = tswap32(msgp->msg_flags);
3281 
3282     count = tswapal(msgp->msg_iovlen);
3283     target_vec = tswapal(msgp->msg_iov);
3284 
3285     if (count > IOV_MAX) {
3286         /* sendrcvmsg returns a different errno for this condition than
3287          * readv/writev, so we must catch it here before lock_iovec() does.
3288          */
3289         ret = -TARGET_EMSGSIZE;
3290         goto out2;
3291     }
3292 
3293     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3294                      target_vec, count, send);
3295     if (vec == NULL) {
3296         ret = -host_to_target_errno(errno);
3297         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3298         if (!send || ret) {
3299             goto out2;
3300         }
3301     }
3302     msg.msg_iovlen = count;
3303     msg.msg_iov = vec;
3304 
3305     if (send) {
3306         if (fd_trans_target_to_host_data(fd)) {
3307             void *host_msg;
3308 
3309             host_msg = g_malloc(msg.msg_iov->iov_len);
3310             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3311             ret = fd_trans_target_to_host_data(fd)(host_msg,
3312                                                    msg.msg_iov->iov_len);
3313             if (ret >= 0) {
3314                 msg.msg_iov->iov_base = host_msg;
3315                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3316             }
3317             g_free(host_msg);
3318         } else {
3319             ret = target_to_host_cmsg(&msg, msgp);
3320             if (ret == 0) {
3321                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3322             }
3323         }
3324     } else {
3325         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3326         if (!is_error(ret)) {
3327             len = ret;
3328             if (fd_trans_host_to_target_data(fd)) {
3329                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3330                                                MIN(msg.msg_iov->iov_len, len));
3331             }
3332             if (!is_error(ret)) {
3333                 ret = host_to_target_cmsg(msgp, &msg);
3334             }
3335             if (!is_error(ret)) {
3336                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3337                 msgp->msg_flags = tswap32(msg.msg_flags);
3338                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3339                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3340                                     msg.msg_name, msg.msg_namelen);
3341                     if (ret) {
3342                         goto out;
3343                     }
3344                 }
3345 
3346                 ret = len;
3347             }
3348         }
3349     }
3350 
3351 out:
3352     if (vec) {
3353         unlock_iovec(vec, target_vec, count, !send);
3354     }
3355 out2:
3356     return ret;
3357 }
3358 
3359 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3360                                int flags, int send)
3361 {
3362     abi_long ret;
3363     struct target_msghdr *msgp;
3364 
3365     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3366                           msgp,
3367                           target_msg,
3368                           send ? 1 : 0)) {
3369         return -TARGET_EFAULT;
3370     }
3371     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3372     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3373     return ret;
3374 }
3375 
3376 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3377  * so it might not have this *mmsg-specific flag either.
3378  */
3379 #ifndef MSG_WAITFORONE
3380 #define MSG_WAITFORONE 0x10000
3381 #endif
3382 
3383 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3384                                 unsigned int vlen, unsigned int flags,
3385                                 int send)
3386 {
3387     struct target_mmsghdr *mmsgp;
3388     abi_long ret = 0;
3389     int i;
3390 
3391     if (vlen > UIO_MAXIOV) {
3392         vlen = UIO_MAXIOV;
3393     }
3394 
3395     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3396     if (!mmsgp) {
3397         return -TARGET_EFAULT;
3398     }
3399 
3400     for (i = 0; i < vlen; i++) {
3401         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3402         if (is_error(ret)) {
3403             break;
3404         }
3405         mmsgp[i].msg_len = tswap32(ret);
3406         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3407         if (flags & MSG_WAITFORONE) {
3408             flags |= MSG_DONTWAIT;
3409         }
3410     }
3411 
3412     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3413 
3414     /* Return number of datagrams sent if we sent any at all;
3415      * otherwise return the error.
3416      */
3417     if (i) {
3418         return i;
3419     }
3420     return ret;
3421 }
3422 
3423 /* do_accept4() Must return target values and target errnos. */
3424 static abi_long do_accept4(int fd, abi_ulong target_addr,
3425                            abi_ulong target_addrlen_addr, int flags)
3426 {
3427     socklen_t addrlen, ret_addrlen;
3428     void *addr;
3429     abi_long ret;
3430     int host_flags;
3431 
3432     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3433 
3434     if (target_addr == 0) {
3435         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3436     }
3437 
3438     /* linux returns EFAULT if addrlen pointer is invalid */
3439     if (get_user_u32(addrlen, target_addrlen_addr))
3440         return -TARGET_EFAULT;
3441 
3442     if ((int)addrlen < 0) {
3443         return -TARGET_EINVAL;
3444     }
3445 
3446     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3447         return -TARGET_EFAULT;
3448     }
3449 
3450     addr = alloca(addrlen);
3451 
3452     ret_addrlen = addrlen;
3453     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3454     if (!is_error(ret)) {
3455         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3456         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3457             ret = -TARGET_EFAULT;
3458         }
3459     }
3460     return ret;
3461 }
3462 
3463 /* do_getpeername() Must return target values and target errnos. */
3464 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3465                                abi_ulong target_addrlen_addr)
3466 {
3467     socklen_t addrlen, ret_addrlen;
3468     void *addr;
3469     abi_long ret;
3470 
3471     if (get_user_u32(addrlen, target_addrlen_addr))
3472         return -TARGET_EFAULT;
3473 
3474     if ((int)addrlen < 0) {
3475         return -TARGET_EINVAL;
3476     }
3477 
3478     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3479         return -TARGET_EFAULT;
3480     }
3481 
3482     addr = alloca(addrlen);
3483 
3484     ret_addrlen = addrlen;
3485     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3486     if (!is_error(ret)) {
3487         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3488         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3489             ret = -TARGET_EFAULT;
3490         }
3491     }
3492     return ret;
3493 }
3494 
3495 /* do_getsockname() Must return target values and target errnos. */
3496 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3497                                abi_ulong target_addrlen_addr)
3498 {
3499     socklen_t addrlen, ret_addrlen;
3500     void *addr;
3501     abi_long ret;
3502 
3503     if (get_user_u32(addrlen, target_addrlen_addr))
3504         return -TARGET_EFAULT;
3505 
3506     if ((int)addrlen < 0) {
3507         return -TARGET_EINVAL;
3508     }
3509 
3510     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3511         return -TARGET_EFAULT;
3512     }
3513 
3514     addr = alloca(addrlen);
3515 
3516     ret_addrlen = addrlen;
3517     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3518     if (!is_error(ret)) {
3519         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3520         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3521             ret = -TARGET_EFAULT;
3522         }
3523     }
3524     return ret;
3525 }
3526 
3527 /* do_socketpair() Must return target values and target errnos. */
3528 static abi_long do_socketpair(int domain, int type, int protocol,
3529                               abi_ulong target_tab_addr)
3530 {
3531     int tab[2];
3532     abi_long ret;
3533 
3534     target_to_host_sock_type(&type);
3535 
3536     ret = get_errno(socketpair(domain, type, protocol, tab));
3537     if (!is_error(ret)) {
3538         if (put_user_s32(tab[0], target_tab_addr)
3539             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3540             ret = -TARGET_EFAULT;
3541     }
3542     return ret;
3543 }
3544 
3545 /* do_sendto() Must return target values and target errnos. */
3546 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3547                           abi_ulong target_addr, socklen_t addrlen)
3548 {
3549     void *addr;
3550     void *host_msg;
3551     void *copy_msg = NULL;
3552     abi_long ret;
3553 
3554     if ((int)addrlen < 0) {
3555         return -TARGET_EINVAL;
3556     }
3557 
3558     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3559     if (!host_msg)
3560         return -TARGET_EFAULT;
3561     if (fd_trans_target_to_host_data(fd)) {
3562         copy_msg = host_msg;
3563         host_msg = g_malloc(len);
3564         memcpy(host_msg, copy_msg, len);
3565         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3566         if (ret < 0) {
3567             goto fail;
3568         }
3569     }
3570     if (target_addr) {
3571         addr = alloca(addrlen+1);
3572         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3573         if (ret) {
3574             goto fail;
3575         }
3576         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3577     } else {
3578         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3579     }
3580 fail:
3581     if (copy_msg) {
3582         g_free(host_msg);
3583         host_msg = copy_msg;
3584     }
3585     unlock_user(host_msg, msg, 0);
3586     return ret;
3587 }
3588 
3589 /* do_recvfrom() Must return target values and target errnos. */
3590 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3591                             abi_ulong target_addr,
3592                             abi_ulong target_addrlen)
3593 {
3594     socklen_t addrlen, ret_addrlen;
3595     void *addr;
3596     void *host_msg;
3597     abi_long ret;
3598 
3599     if (!msg) {
3600         host_msg = NULL;
3601     } else {
3602         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3603         if (!host_msg) {
3604             return -TARGET_EFAULT;
3605         }
3606     }
3607     if (target_addr) {
3608         if (get_user_u32(addrlen, target_addrlen)) {
3609             ret = -TARGET_EFAULT;
3610             goto fail;
3611         }
3612         if ((int)addrlen < 0) {
3613             ret = -TARGET_EINVAL;
3614             goto fail;
3615         }
3616         addr = alloca(addrlen);
3617         ret_addrlen = addrlen;
3618         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3619                                       addr, &ret_addrlen));
3620     } else {
3621         addr = NULL; /* To keep compiler quiet.  */
3622         addrlen = 0; /* To keep compiler quiet.  */
3623         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3624     }
3625     if (!is_error(ret)) {
3626         if (fd_trans_host_to_target_data(fd)) {
3627             abi_long trans;
3628             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3629             if (is_error(trans)) {
3630                 ret = trans;
3631                 goto fail;
3632             }
3633         }
3634         if (target_addr) {
3635             host_to_target_sockaddr(target_addr, addr,
3636                                     MIN(addrlen, ret_addrlen));
3637             if (put_user_u32(ret_addrlen, target_addrlen)) {
3638                 ret = -TARGET_EFAULT;
3639                 goto fail;
3640             }
3641         }
3642         unlock_user(host_msg, msg, len);
3643     } else {
3644 fail:
3645         unlock_user(host_msg, msg, 0);
3646     }
3647     return ret;
3648 }
3649 
3650 #ifdef TARGET_NR_socketcall
3651 /* do_socketcall() must return target values and target errnos. */
3652 static abi_long do_socketcall(int num, abi_ulong vptr)
3653 {
3654     static const unsigned nargs[] = { /* number of arguments per operation */
3655         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3656         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3657         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3658         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3659         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3660         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3661         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3662         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3663         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3664         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3665         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3666         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3667         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3668         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3669         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3670         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3671         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3672         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3673         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3674         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3675     };
3676     abi_long a[6]; /* max 6 args */
3677     unsigned i;
3678 
3679     /* check the range of the first argument num */
3680     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3681     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3682         return -TARGET_EINVAL;
3683     }
3684     /* ensure we have space for args */
3685     if (nargs[num] > ARRAY_SIZE(a)) {
3686         return -TARGET_EINVAL;
3687     }
3688     /* collect the arguments in a[] according to nargs[] */
3689     for (i = 0; i < nargs[num]; ++i) {
3690         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3691             return -TARGET_EFAULT;
3692         }
3693     }
3694     /* now when we have the args, invoke the appropriate underlying function */
3695     switch (num) {
3696     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3697         return do_socket(a[0], a[1], a[2]);
3698     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3699         return do_bind(a[0], a[1], a[2]);
3700     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3701         return do_connect(a[0], a[1], a[2]);
3702     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3703         return get_errno(listen(a[0], a[1]));
3704     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3705         return do_accept4(a[0], a[1], a[2], 0);
3706     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3707         return do_getsockname(a[0], a[1], a[2]);
3708     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3709         return do_getpeername(a[0], a[1], a[2]);
3710     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3711         return do_socketpair(a[0], a[1], a[2], a[3]);
3712     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3713         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3714     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3715         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3716     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3717         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3718     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3719         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3720     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3721         return get_errno(shutdown(a[0], a[1]));
3722     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3723         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3724     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3725         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3726     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3727         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3728     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3729         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3730     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3731         return do_accept4(a[0], a[1], a[2], a[3]);
3732     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3733         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3734     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3735         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3736     default:
3737         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3738         return -TARGET_EINVAL;
3739     }
3740 }
3741 #endif
3742 
3743 #define N_SHM_REGIONS	32
3744 
3745 static struct shm_region {
3746     abi_ulong start;
3747     abi_ulong size;
3748     bool in_use;
3749 } shm_regions[N_SHM_REGIONS];
3750 
3751 #ifndef TARGET_SEMID64_DS
3752 /* asm-generic version of this struct */
3753 struct target_semid64_ds
3754 {
3755   struct target_ipc_perm sem_perm;
3756   abi_ulong sem_otime;
3757 #if TARGET_ABI_BITS == 32
3758   abi_ulong __unused1;
3759 #endif
3760   abi_ulong sem_ctime;
3761 #if TARGET_ABI_BITS == 32
3762   abi_ulong __unused2;
3763 #endif
3764   abi_ulong sem_nsems;
3765   abi_ulong __unused3;
3766   abi_ulong __unused4;
3767 };
3768 #endif
3769 
3770 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3771                                                abi_ulong target_addr)
3772 {
3773     struct target_ipc_perm *target_ip;
3774     struct target_semid64_ds *target_sd;
3775 
3776     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3777         return -TARGET_EFAULT;
3778     target_ip = &(target_sd->sem_perm);
3779     host_ip->__key = tswap32(target_ip->__key);
3780     host_ip->uid = tswap32(target_ip->uid);
3781     host_ip->gid = tswap32(target_ip->gid);
3782     host_ip->cuid = tswap32(target_ip->cuid);
3783     host_ip->cgid = tswap32(target_ip->cgid);
3784 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3785     host_ip->mode = tswap32(target_ip->mode);
3786 #else
3787     host_ip->mode = tswap16(target_ip->mode);
3788 #endif
3789 #if defined(TARGET_PPC)
3790     host_ip->__seq = tswap32(target_ip->__seq);
3791 #else
3792     host_ip->__seq = tswap16(target_ip->__seq);
3793 #endif
3794     unlock_user_struct(target_sd, target_addr, 0);
3795     return 0;
3796 }
3797 
3798 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3799                                                struct ipc_perm *host_ip)
3800 {
3801     struct target_ipc_perm *target_ip;
3802     struct target_semid64_ds *target_sd;
3803 
3804     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3805         return -TARGET_EFAULT;
3806     target_ip = &(target_sd->sem_perm);
3807     target_ip->__key = tswap32(host_ip->__key);
3808     target_ip->uid = tswap32(host_ip->uid);
3809     target_ip->gid = tswap32(host_ip->gid);
3810     target_ip->cuid = tswap32(host_ip->cuid);
3811     target_ip->cgid = tswap32(host_ip->cgid);
3812 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3813     target_ip->mode = tswap32(host_ip->mode);
3814 #else
3815     target_ip->mode = tswap16(host_ip->mode);
3816 #endif
3817 #if defined(TARGET_PPC)
3818     target_ip->__seq = tswap32(host_ip->__seq);
3819 #else
3820     target_ip->__seq = tswap16(host_ip->__seq);
3821 #endif
3822     unlock_user_struct(target_sd, target_addr, 1);
3823     return 0;
3824 }
3825 
3826 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3827                                                abi_ulong target_addr)
3828 {
3829     struct target_semid64_ds *target_sd;
3830 
3831     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3832         return -TARGET_EFAULT;
3833     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3834         return -TARGET_EFAULT;
3835     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3836     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3837     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3838     unlock_user_struct(target_sd, target_addr, 0);
3839     return 0;
3840 }
3841 
3842 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3843                                                struct semid_ds *host_sd)
3844 {
3845     struct target_semid64_ds *target_sd;
3846 
3847     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3848         return -TARGET_EFAULT;
3849     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3850         return -TARGET_EFAULT;
3851     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3852     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3853     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3854     unlock_user_struct(target_sd, target_addr, 1);
3855     return 0;
3856 }
3857 
3858 struct target_seminfo {
3859     int semmap;
3860     int semmni;
3861     int semmns;
3862     int semmnu;
3863     int semmsl;
3864     int semopm;
3865     int semume;
3866     int semusz;
3867     int semvmx;
3868     int semaem;
3869 };
3870 
3871 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3872                                               struct seminfo *host_seminfo)
3873 {
3874     struct target_seminfo *target_seminfo;
3875     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3876         return -TARGET_EFAULT;
3877     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3878     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3879     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3880     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3881     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3882     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3883     __put_user(host_seminfo->semume, &target_seminfo->semume);
3884     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3885     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3886     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3887     unlock_user_struct(target_seminfo, target_addr, 1);
3888     return 0;
3889 }
3890 
3891 union semun {
3892 	int val;
3893 	struct semid_ds *buf;
3894 	unsigned short *array;
3895 	struct seminfo *__buf;
3896 };
3897 
3898 union target_semun {
3899 	int val;
3900 	abi_ulong buf;
3901 	abi_ulong array;
3902 	abi_ulong __buf;
3903 };
3904 
3905 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3906                                                abi_ulong target_addr)
3907 {
3908     int nsems;
3909     unsigned short *array;
3910     union semun semun;
3911     struct semid_ds semid_ds;
3912     int i, ret;
3913 
3914     semun.buf = &semid_ds;
3915 
3916     ret = semctl(semid, 0, IPC_STAT, semun);
3917     if (ret == -1)
3918         return get_errno(ret);
3919 
3920     nsems = semid_ds.sem_nsems;
3921 
3922     *host_array = g_try_new(unsigned short, nsems);
3923     if (!*host_array) {
3924         return -TARGET_ENOMEM;
3925     }
3926     array = lock_user(VERIFY_READ, target_addr,
3927                       nsems*sizeof(unsigned short), 1);
3928     if (!array) {
3929         g_free(*host_array);
3930         return -TARGET_EFAULT;
3931     }
3932 
3933     for(i=0; i<nsems; i++) {
3934         __get_user((*host_array)[i], &array[i]);
3935     }
3936     unlock_user(array, target_addr, 0);
3937 
3938     return 0;
3939 }
3940 
3941 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3942                                                unsigned short **host_array)
3943 {
3944     int nsems;
3945     unsigned short *array;
3946     union semun semun;
3947     struct semid_ds semid_ds;
3948     int i, ret;
3949 
3950     semun.buf = &semid_ds;
3951 
3952     ret = semctl(semid, 0, IPC_STAT, semun);
3953     if (ret == -1)
3954         return get_errno(ret);
3955 
3956     nsems = semid_ds.sem_nsems;
3957 
3958     array = lock_user(VERIFY_WRITE, target_addr,
3959                       nsems*sizeof(unsigned short), 0);
3960     if (!array)
3961         return -TARGET_EFAULT;
3962 
3963     for(i=0; i<nsems; i++) {
3964         __put_user((*host_array)[i], &array[i]);
3965     }
3966     g_free(*host_array);
3967     unlock_user(array, target_addr, 1);
3968 
3969     return 0;
3970 }
3971 
3972 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3973                                  abi_ulong target_arg)
3974 {
3975     union target_semun target_su = { .buf = target_arg };
3976     union semun arg;
3977     struct semid_ds dsarg;
3978     unsigned short *array = NULL;
3979     struct seminfo seminfo;
3980     abi_long ret = -TARGET_EINVAL;
3981     abi_long err;
3982     cmd &= 0xff;
3983 
3984     switch( cmd ) {
3985 	case GETVAL:
3986 	case SETVAL:
3987             /* In 64 bit cross-endian situations, we will erroneously pick up
3988              * the wrong half of the union for the "val" element.  To rectify
3989              * this, the entire 8-byte structure is byteswapped, followed by
3990 	     * a swap of the 4 byte val field. In other cases, the data is
3991 	     * already in proper host byte order. */
3992 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3993 		target_su.buf = tswapal(target_su.buf);
3994 		arg.val = tswap32(target_su.val);
3995 	    } else {
3996 		arg.val = target_su.val;
3997 	    }
3998             ret = get_errno(semctl(semid, semnum, cmd, arg));
3999             break;
4000 	case GETALL:
4001 	case SETALL:
4002             err = target_to_host_semarray(semid, &array, target_su.array);
4003             if (err)
4004                 return err;
4005             arg.array = array;
4006             ret = get_errno(semctl(semid, semnum, cmd, arg));
4007             err = host_to_target_semarray(semid, target_su.array, &array);
4008             if (err)
4009                 return err;
4010             break;
4011 	case IPC_STAT:
4012 	case IPC_SET:
4013 	case SEM_STAT:
4014             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4015             if (err)
4016                 return err;
4017             arg.buf = &dsarg;
4018             ret = get_errno(semctl(semid, semnum, cmd, arg));
4019             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4020             if (err)
4021                 return err;
4022             break;
4023 	case IPC_INFO:
4024 	case SEM_INFO:
4025             arg.__buf = &seminfo;
4026             ret = get_errno(semctl(semid, semnum, cmd, arg));
4027             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4028             if (err)
4029                 return err;
4030             break;
4031 	case IPC_RMID:
4032 	case GETPID:
4033 	case GETNCNT:
4034 	case GETZCNT:
4035             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4036             break;
4037     }
4038 
4039     return ret;
4040 }
4041 
4042 struct target_sembuf {
4043     unsigned short sem_num;
4044     short sem_op;
4045     short sem_flg;
4046 };
4047 
4048 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4049                                              abi_ulong target_addr,
4050                                              unsigned nsops)
4051 {
4052     struct target_sembuf *target_sembuf;
4053     int i;
4054 
4055     target_sembuf = lock_user(VERIFY_READ, target_addr,
4056                               nsops*sizeof(struct target_sembuf), 1);
4057     if (!target_sembuf)
4058         return -TARGET_EFAULT;
4059 
4060     for(i=0; i<nsops; i++) {
4061         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4062         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4063         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4064     }
4065 
4066     unlock_user(target_sembuf, target_addr, 0);
4067 
4068     return 0;
4069 }
4070 
4071 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4072     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4073 
4074 /*
4075  * This macro is required to handle the s390 variants, which passes the
4076  * arguments in a different order than default.
4077  */
4078 #ifdef __s390x__
4079 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4080   (__nsops), (__timeout), (__sops)
4081 #else
4082 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4083   (__nsops), 0, (__sops), (__timeout)
4084 #endif
4085 
4086 static inline abi_long do_semtimedop(int semid,
4087                                      abi_long ptr,
4088                                      unsigned nsops,
4089                                      abi_long timeout, bool time64)
4090 {
4091     struct sembuf *sops;
4092     struct timespec ts, *pts = NULL;
4093     abi_long ret;
4094 
4095     if (timeout) {
4096         pts = &ts;
4097         if (time64) {
4098             if (target_to_host_timespec64(pts, timeout)) {
4099                 return -TARGET_EFAULT;
4100             }
4101         } else {
4102             if (target_to_host_timespec(pts, timeout)) {
4103                 return -TARGET_EFAULT;
4104             }
4105         }
4106     }
4107 
4108     if (nsops > TARGET_SEMOPM) {
4109         return -TARGET_E2BIG;
4110     }
4111 
4112     sops = g_new(struct sembuf, nsops);
4113 
4114     if (target_to_host_sembuf(sops, ptr, nsops)) {
4115         g_free(sops);
4116         return -TARGET_EFAULT;
4117     }
4118 
4119     ret = -TARGET_ENOSYS;
4120 #ifdef __NR_semtimedop
4121     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4122 #endif
4123 #ifdef __NR_ipc
4124     if (ret == -TARGET_ENOSYS) {
4125         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4126                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4127     }
4128 #endif
4129     g_free(sops);
4130     return ret;
4131 }
4132 #endif
4133 
4134 struct target_msqid_ds
4135 {
4136     struct target_ipc_perm msg_perm;
4137     abi_ulong msg_stime;
4138 #if TARGET_ABI_BITS == 32
4139     abi_ulong __unused1;
4140 #endif
4141     abi_ulong msg_rtime;
4142 #if TARGET_ABI_BITS == 32
4143     abi_ulong __unused2;
4144 #endif
4145     abi_ulong msg_ctime;
4146 #if TARGET_ABI_BITS == 32
4147     abi_ulong __unused3;
4148 #endif
4149     abi_ulong __msg_cbytes;
4150     abi_ulong msg_qnum;
4151     abi_ulong msg_qbytes;
4152     abi_ulong msg_lspid;
4153     abi_ulong msg_lrpid;
4154     abi_ulong __unused4;
4155     abi_ulong __unused5;
4156 };
4157 
4158 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4159                                                abi_ulong target_addr)
4160 {
4161     struct target_msqid_ds *target_md;
4162 
4163     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4164         return -TARGET_EFAULT;
4165     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4166         return -TARGET_EFAULT;
4167     host_md->msg_stime = tswapal(target_md->msg_stime);
4168     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4169     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4170     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4171     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4172     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4173     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4174     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4175     unlock_user_struct(target_md, target_addr, 0);
4176     return 0;
4177 }
4178 
4179 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4180                                                struct msqid_ds *host_md)
4181 {
4182     struct target_msqid_ds *target_md;
4183 
4184     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4185         return -TARGET_EFAULT;
4186     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4187         return -TARGET_EFAULT;
4188     target_md->msg_stime = tswapal(host_md->msg_stime);
4189     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4190     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4191     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4192     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4193     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4194     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4195     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4196     unlock_user_struct(target_md, target_addr, 1);
4197     return 0;
4198 }
4199 
4200 struct target_msginfo {
4201     int msgpool;
4202     int msgmap;
4203     int msgmax;
4204     int msgmnb;
4205     int msgmni;
4206     int msgssz;
4207     int msgtql;
4208     unsigned short int msgseg;
4209 };
4210 
4211 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4212                                               struct msginfo *host_msginfo)
4213 {
4214     struct target_msginfo *target_msginfo;
4215     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4216         return -TARGET_EFAULT;
4217     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4218     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4219     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4220     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4221     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4222     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4223     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4224     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4225     unlock_user_struct(target_msginfo, target_addr, 1);
4226     return 0;
4227 }
4228 
4229 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4230 {
4231     struct msqid_ds dsarg;
4232     struct msginfo msginfo;
4233     abi_long ret = -TARGET_EINVAL;
4234 
4235     cmd &= 0xff;
4236 
4237     switch (cmd) {
4238     case IPC_STAT:
4239     case IPC_SET:
4240     case MSG_STAT:
4241         if (target_to_host_msqid_ds(&dsarg,ptr))
4242             return -TARGET_EFAULT;
4243         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4244         if (host_to_target_msqid_ds(ptr,&dsarg))
4245             return -TARGET_EFAULT;
4246         break;
4247     case IPC_RMID:
4248         ret = get_errno(msgctl(msgid, cmd, NULL));
4249         break;
4250     case IPC_INFO:
4251     case MSG_INFO:
4252         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4253         if (host_to_target_msginfo(ptr, &msginfo))
4254             return -TARGET_EFAULT;
4255         break;
4256     }
4257 
4258     return ret;
4259 }
4260 
4261 struct target_msgbuf {
4262     abi_long mtype;
4263     char	mtext[1];
4264 };
4265 
4266 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4267                                  ssize_t msgsz, int msgflg)
4268 {
4269     struct target_msgbuf *target_mb;
4270     struct msgbuf *host_mb;
4271     abi_long ret = 0;
4272 
4273     if (msgsz < 0) {
4274         return -TARGET_EINVAL;
4275     }
4276 
4277     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4278         return -TARGET_EFAULT;
4279     host_mb = g_try_malloc(msgsz + sizeof(long));
4280     if (!host_mb) {
4281         unlock_user_struct(target_mb, msgp, 0);
4282         return -TARGET_ENOMEM;
4283     }
4284     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4285     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4286     ret = -TARGET_ENOSYS;
4287 #ifdef __NR_msgsnd
4288     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4289 #endif
4290 #ifdef __NR_ipc
4291     if (ret == -TARGET_ENOSYS) {
4292 #ifdef __s390x__
4293         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4294                                  host_mb));
4295 #else
4296         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4297                                  host_mb, 0));
4298 #endif
4299     }
4300 #endif
4301     g_free(host_mb);
4302     unlock_user_struct(target_mb, msgp, 0);
4303 
4304     return ret;
4305 }
4306 
4307 #ifdef __NR_ipc
4308 #if defined(__sparc__)
4309 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4310 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4311 #elif defined(__s390x__)
4312 /* The s390 sys_ipc variant has only five parameters.  */
4313 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4314     ((long int[]){(long int)__msgp, __msgtyp})
4315 #else
4316 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4317     ((long int[]){(long int)__msgp, __msgtyp}), 0
4318 #endif
4319 #endif
4320 
4321 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4322                                  ssize_t msgsz, abi_long msgtyp,
4323                                  int msgflg)
4324 {
4325     struct target_msgbuf *target_mb;
4326     char *target_mtext;
4327     struct msgbuf *host_mb;
4328     abi_long ret = 0;
4329 
4330     if (msgsz < 0) {
4331         return -TARGET_EINVAL;
4332     }
4333 
4334     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4335         return -TARGET_EFAULT;
4336 
4337     host_mb = g_try_malloc(msgsz + sizeof(long));
4338     if (!host_mb) {
4339         ret = -TARGET_ENOMEM;
4340         goto end;
4341     }
4342     ret = -TARGET_ENOSYS;
4343 #ifdef __NR_msgrcv
4344     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4345 #endif
4346 #ifdef __NR_ipc
4347     if (ret == -TARGET_ENOSYS) {
4348         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4349                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4350     }
4351 #endif
4352 
4353     if (ret > 0) {
4354         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4355         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4356         if (!target_mtext) {
4357             ret = -TARGET_EFAULT;
4358             goto end;
4359         }
4360         memcpy(target_mb->mtext, host_mb->mtext, ret);
4361         unlock_user(target_mtext, target_mtext_addr, ret);
4362     }
4363 
4364     target_mb->mtype = tswapal(host_mb->mtype);
4365 
4366 end:
4367     if (target_mb)
4368         unlock_user_struct(target_mb, msgp, 1);
4369     g_free(host_mb);
4370     return ret;
4371 }
4372 
4373 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4374                                                abi_ulong target_addr)
4375 {
4376     struct target_shmid_ds *target_sd;
4377 
4378     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4379         return -TARGET_EFAULT;
4380     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4381         return -TARGET_EFAULT;
4382     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4383     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4384     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4385     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4386     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4387     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4388     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4389     unlock_user_struct(target_sd, target_addr, 0);
4390     return 0;
4391 }
4392 
4393 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4394                                                struct shmid_ds *host_sd)
4395 {
4396     struct target_shmid_ds *target_sd;
4397 
4398     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4399         return -TARGET_EFAULT;
4400     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4401         return -TARGET_EFAULT;
4402     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4403     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4404     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4405     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4406     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4407     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4408     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4409     unlock_user_struct(target_sd, target_addr, 1);
4410     return 0;
4411 }
4412 
4413 struct  target_shminfo {
4414     abi_ulong shmmax;
4415     abi_ulong shmmin;
4416     abi_ulong shmmni;
4417     abi_ulong shmseg;
4418     abi_ulong shmall;
4419 };
4420 
4421 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4422                                               struct shminfo *host_shminfo)
4423 {
4424     struct target_shminfo *target_shminfo;
4425     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4426         return -TARGET_EFAULT;
4427     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4428     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4429     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4430     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4431     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4432     unlock_user_struct(target_shminfo, target_addr, 1);
4433     return 0;
4434 }
4435 
4436 struct target_shm_info {
4437     int used_ids;
4438     abi_ulong shm_tot;
4439     abi_ulong shm_rss;
4440     abi_ulong shm_swp;
4441     abi_ulong swap_attempts;
4442     abi_ulong swap_successes;
4443 };
4444 
4445 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4446                                                struct shm_info *host_shm_info)
4447 {
4448     struct target_shm_info *target_shm_info;
4449     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4450         return -TARGET_EFAULT;
4451     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4452     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4453     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4454     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4455     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4456     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4457     unlock_user_struct(target_shm_info, target_addr, 1);
4458     return 0;
4459 }
4460 
4461 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4462 {
4463     struct shmid_ds dsarg;
4464     struct shminfo shminfo;
4465     struct shm_info shm_info;
4466     abi_long ret = -TARGET_EINVAL;
4467 
4468     cmd &= 0xff;
4469 
4470     switch(cmd) {
4471     case IPC_STAT:
4472     case IPC_SET:
4473     case SHM_STAT:
4474         if (target_to_host_shmid_ds(&dsarg, buf))
4475             return -TARGET_EFAULT;
4476         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4477         if (host_to_target_shmid_ds(buf, &dsarg))
4478             return -TARGET_EFAULT;
4479         break;
4480     case IPC_INFO:
4481         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4482         if (host_to_target_shminfo(buf, &shminfo))
4483             return -TARGET_EFAULT;
4484         break;
4485     case SHM_INFO:
4486         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4487         if (host_to_target_shm_info(buf, &shm_info))
4488             return -TARGET_EFAULT;
4489         break;
4490     case IPC_RMID:
4491     case SHM_LOCK:
4492     case SHM_UNLOCK:
4493         ret = get_errno(shmctl(shmid, cmd, NULL));
4494         break;
4495     }
4496 
4497     return ret;
4498 }
4499 
4500 #ifndef TARGET_FORCE_SHMLBA
4501 /* For most architectures, SHMLBA is the same as the page size;
4502  * some architectures have larger values, in which case they should
4503  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4504  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4505  * and defining its own value for SHMLBA.
4506  *
4507  * The kernel also permits SHMLBA to be set by the architecture to a
4508  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4509  * this means that addresses are rounded to the large size if
4510  * SHM_RND is set but addresses not aligned to that size are not rejected
4511  * as long as they are at least page-aligned. Since the only architecture
4512  * which uses this is ia64 this code doesn't provide for that oddity.
4513  */
4514 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4515 {
4516     return TARGET_PAGE_SIZE;
4517 }
4518 #endif
4519 
4520 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4521                                  int shmid, abi_ulong shmaddr, int shmflg)
4522 {
4523     CPUState *cpu = env_cpu(cpu_env);
4524     abi_long raddr;
4525     void *host_raddr;
4526     struct shmid_ds shm_info;
4527     int i,ret;
4528     abi_ulong shmlba;
4529 
4530     /* shmat pointers are always untagged */
4531 
4532     /* find out the length of the shared memory segment */
4533     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4534     if (is_error(ret)) {
4535         /* can't get length, bail out */
4536         return ret;
4537     }
4538 
4539     shmlba = target_shmlba(cpu_env);
4540 
4541     if (shmaddr & (shmlba - 1)) {
4542         if (shmflg & SHM_RND) {
4543             shmaddr &= ~(shmlba - 1);
4544         } else {
4545             return -TARGET_EINVAL;
4546         }
4547     }
4548     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4549         return -TARGET_EINVAL;
4550     }
4551 
4552     mmap_lock();
4553 
4554     /*
4555      * We're mapping shared memory, so ensure we generate code for parallel
4556      * execution and flush old translations.  This will work up to the level
4557      * supported by the host -- anything that requires EXCP_ATOMIC will not
4558      * be atomic with respect to an external process.
4559      */
4560     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4561         cpu->tcg_cflags |= CF_PARALLEL;
4562         tb_flush(cpu);
4563     }
4564 
4565     if (shmaddr)
4566         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4567     else {
4568         abi_ulong mmap_start;
4569 
4570         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4571         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4572 
4573         if (mmap_start == -1) {
4574             errno = ENOMEM;
4575             host_raddr = (void *)-1;
4576         } else
4577             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4578                                shmflg | SHM_REMAP);
4579     }
4580 
4581     if (host_raddr == (void *)-1) {
4582         mmap_unlock();
4583         return get_errno((long)host_raddr);
4584     }
4585     raddr=h2g((unsigned long)host_raddr);
4586 
4587     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4588                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4589                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4590 
4591     for (i = 0; i < N_SHM_REGIONS; i++) {
4592         if (!shm_regions[i].in_use) {
4593             shm_regions[i].in_use = true;
4594             shm_regions[i].start = raddr;
4595             shm_regions[i].size = shm_info.shm_segsz;
4596             break;
4597         }
4598     }
4599 
4600     mmap_unlock();
4601     return raddr;
4602 
4603 }
4604 
4605 static inline abi_long do_shmdt(abi_ulong shmaddr)
4606 {
4607     int i;
4608     abi_long rv;
4609 
4610     /* shmdt pointers are always untagged */
4611 
4612     mmap_lock();
4613 
4614     for (i = 0; i < N_SHM_REGIONS; ++i) {
4615         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4616             shm_regions[i].in_use = false;
4617             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4618             break;
4619         }
4620     }
4621     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4622 
4623     mmap_unlock();
4624 
4625     return rv;
4626 }
4627 
4628 #ifdef TARGET_NR_ipc
4629 /* ??? This only works with linear mappings.  */
4630 /* do_ipc() must return target values and target errnos. */
4631 static abi_long do_ipc(CPUArchState *cpu_env,
4632                        unsigned int call, abi_long first,
4633                        abi_long second, abi_long third,
4634                        abi_long ptr, abi_long fifth)
4635 {
4636     int version;
4637     abi_long ret = 0;
4638 
4639     version = call >> 16;
4640     call &= 0xffff;
4641 
4642     switch (call) {
4643     case IPCOP_semop:
4644         ret = do_semtimedop(first, ptr, second, 0, false);
4645         break;
4646     case IPCOP_semtimedop:
4647     /*
4648      * The s390 sys_ipc variant has only five parameters instead of six
4649      * (as for default variant) and the only difference is the handling of
4650      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4651      * to a struct timespec where the generic variant uses fifth parameter.
4652      */
4653 #if defined(TARGET_S390X)
4654         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4655 #else
4656         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4657 #endif
4658         break;
4659 
4660     case IPCOP_semget:
4661         ret = get_errno(semget(first, second, third));
4662         break;
4663 
4664     case IPCOP_semctl: {
4665         /* The semun argument to semctl is passed by value, so dereference the
4666          * ptr argument. */
4667         abi_ulong atptr;
4668         get_user_ual(atptr, ptr);
4669         ret = do_semctl(first, second, third, atptr);
4670         break;
4671     }
4672 
4673     case IPCOP_msgget:
4674         ret = get_errno(msgget(first, second));
4675         break;
4676 
4677     case IPCOP_msgsnd:
4678         ret = do_msgsnd(first, ptr, second, third);
4679         break;
4680 
4681     case IPCOP_msgctl:
4682         ret = do_msgctl(first, second, ptr);
4683         break;
4684 
4685     case IPCOP_msgrcv:
4686         switch (version) {
4687         case 0:
4688             {
4689                 struct target_ipc_kludge {
4690                     abi_long msgp;
4691                     abi_long msgtyp;
4692                 } *tmp;
4693 
4694                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4695                     ret = -TARGET_EFAULT;
4696                     break;
4697                 }
4698 
4699                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4700 
4701                 unlock_user_struct(tmp, ptr, 0);
4702                 break;
4703             }
4704         default:
4705             ret = do_msgrcv(first, ptr, second, fifth, third);
4706         }
4707         break;
4708 
4709     case IPCOP_shmat:
4710         switch (version) {
4711         default:
4712         {
4713             abi_ulong raddr;
4714             raddr = do_shmat(cpu_env, first, ptr, second);
4715             if (is_error(raddr))
4716                 return get_errno(raddr);
4717             if (put_user_ual(raddr, third))
4718                 return -TARGET_EFAULT;
4719             break;
4720         }
4721         case 1:
4722             ret = -TARGET_EINVAL;
4723             break;
4724         }
4725 	break;
4726     case IPCOP_shmdt:
4727         ret = do_shmdt(ptr);
4728 	break;
4729 
4730     case IPCOP_shmget:
4731 	/* IPC_* flag values are the same on all linux platforms */
4732 	ret = get_errno(shmget(first, second, third));
4733 	break;
4734 
4735 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4736     case IPCOP_shmctl:
4737         ret = do_shmctl(first, second, ptr);
4738         break;
4739     default:
4740         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4741                       call, version);
4742 	ret = -TARGET_ENOSYS;
4743 	break;
4744     }
4745     return ret;
4746 }
4747 #endif
4748 
4749 /* kernel structure types definitions */
4750 
4751 #define STRUCT(name, ...) STRUCT_ ## name,
4752 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4753 enum {
4754 #include "syscall_types.h"
4755 STRUCT_MAX
4756 };
4757 #undef STRUCT
4758 #undef STRUCT_SPECIAL
4759 
4760 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4761 #define STRUCT_SPECIAL(name)
4762 #include "syscall_types.h"
4763 #undef STRUCT
4764 #undef STRUCT_SPECIAL
4765 
4766 #define MAX_STRUCT_SIZE 4096
4767 
4768 #ifdef CONFIG_FIEMAP
4769 /* So fiemap access checks don't overflow on 32 bit systems.
4770  * This is very slightly smaller than the limit imposed by
4771  * the underlying kernel.
4772  */
4773 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4774                             / sizeof(struct fiemap_extent))
4775 
4776 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4777                                        int fd, int cmd, abi_long arg)
4778 {
4779     /* The parameter for this ioctl is a struct fiemap followed
4780      * by an array of struct fiemap_extent whose size is set
4781      * in fiemap->fm_extent_count. The array is filled in by the
4782      * ioctl.
4783      */
4784     int target_size_in, target_size_out;
4785     struct fiemap *fm;
4786     const argtype *arg_type = ie->arg_type;
4787     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4788     void *argptr, *p;
4789     abi_long ret;
4790     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4791     uint32_t outbufsz;
4792     int free_fm = 0;
4793 
4794     assert(arg_type[0] == TYPE_PTR);
4795     assert(ie->access == IOC_RW);
4796     arg_type++;
4797     target_size_in = thunk_type_size(arg_type, 0);
4798     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4799     if (!argptr) {
4800         return -TARGET_EFAULT;
4801     }
4802     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4803     unlock_user(argptr, arg, 0);
4804     fm = (struct fiemap *)buf_temp;
4805     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4806         return -TARGET_EINVAL;
4807     }
4808 
4809     outbufsz = sizeof (*fm) +
4810         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4811 
4812     if (outbufsz > MAX_STRUCT_SIZE) {
4813         /* We can't fit all the extents into the fixed size buffer.
4814          * Allocate one that is large enough and use it instead.
4815          */
4816         fm = g_try_malloc(outbufsz);
4817         if (!fm) {
4818             return -TARGET_ENOMEM;
4819         }
4820         memcpy(fm, buf_temp, sizeof(struct fiemap));
4821         free_fm = 1;
4822     }
4823     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4824     if (!is_error(ret)) {
4825         target_size_out = target_size_in;
4826         /* An extent_count of 0 means we were only counting the extents
4827          * so there are no structs to copy
4828          */
4829         if (fm->fm_extent_count != 0) {
4830             target_size_out += fm->fm_mapped_extents * extent_size;
4831         }
4832         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4833         if (!argptr) {
4834             ret = -TARGET_EFAULT;
4835         } else {
4836             /* Convert the struct fiemap */
4837             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4838             if (fm->fm_extent_count != 0) {
4839                 p = argptr + target_size_in;
4840                 /* ...and then all the struct fiemap_extents */
4841                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4842                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4843                                   THUNK_TARGET);
4844                     p += extent_size;
4845                 }
4846             }
4847             unlock_user(argptr, arg, target_size_out);
4848         }
4849     }
4850     if (free_fm) {
4851         g_free(fm);
4852     }
4853     return ret;
4854 }
4855 #endif
4856 
4857 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4858                                 int fd, int cmd, abi_long arg)
4859 {
4860     const argtype *arg_type = ie->arg_type;
4861     int target_size;
4862     void *argptr;
4863     int ret;
4864     struct ifconf *host_ifconf;
4865     uint32_t outbufsz;
4866     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4867     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4868     int target_ifreq_size;
4869     int nb_ifreq;
4870     int free_buf = 0;
4871     int i;
4872     int target_ifc_len;
4873     abi_long target_ifc_buf;
4874     int host_ifc_len;
4875     char *host_ifc_buf;
4876 
4877     assert(arg_type[0] == TYPE_PTR);
4878     assert(ie->access == IOC_RW);
4879 
4880     arg_type++;
4881     target_size = thunk_type_size(arg_type, 0);
4882 
4883     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4884     if (!argptr)
4885         return -TARGET_EFAULT;
4886     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4887     unlock_user(argptr, arg, 0);
4888 
4889     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4890     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4891     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4892 
4893     if (target_ifc_buf != 0) {
4894         target_ifc_len = host_ifconf->ifc_len;
4895         nb_ifreq = target_ifc_len / target_ifreq_size;
4896         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4897 
4898         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4899         if (outbufsz > MAX_STRUCT_SIZE) {
4900             /*
4901              * We can't fit all the extents into the fixed size buffer.
4902              * Allocate one that is large enough and use it instead.
4903              */
4904             host_ifconf = g_try_malloc(outbufsz);
4905             if (!host_ifconf) {
4906                 return -TARGET_ENOMEM;
4907             }
4908             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4909             free_buf = 1;
4910         }
4911         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4912 
4913         host_ifconf->ifc_len = host_ifc_len;
4914     } else {
4915       host_ifc_buf = NULL;
4916     }
4917     host_ifconf->ifc_buf = host_ifc_buf;
4918 
4919     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4920     if (!is_error(ret)) {
4921 	/* convert host ifc_len to target ifc_len */
4922 
4923         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4924         target_ifc_len = nb_ifreq * target_ifreq_size;
4925         host_ifconf->ifc_len = target_ifc_len;
4926 
4927 	/* restore target ifc_buf */
4928 
4929         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4930 
4931 	/* copy struct ifconf to target user */
4932 
4933         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4934         if (!argptr)
4935             return -TARGET_EFAULT;
4936         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4937         unlock_user(argptr, arg, target_size);
4938 
4939         if (target_ifc_buf != 0) {
4940             /* copy ifreq[] to target user */
4941             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4942             for (i = 0; i < nb_ifreq ; i++) {
4943                 thunk_convert(argptr + i * target_ifreq_size,
4944                               host_ifc_buf + i * sizeof(struct ifreq),
4945                               ifreq_arg_type, THUNK_TARGET);
4946             }
4947             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4948         }
4949     }
4950 
4951     if (free_buf) {
4952         g_free(host_ifconf);
4953     }
4954 
4955     return ret;
4956 }
4957 
4958 #if defined(CONFIG_USBFS)
4959 #if HOST_LONG_BITS > 64
4960 #error USBDEVFS thunks do not support >64 bit hosts yet.
4961 #endif
4962 struct live_urb {
4963     uint64_t target_urb_adr;
4964     uint64_t target_buf_adr;
4965     char *target_buf_ptr;
4966     struct usbdevfs_urb host_urb;
4967 };
4968 
4969 static GHashTable *usbdevfs_urb_hashtable(void)
4970 {
4971     static GHashTable *urb_hashtable;
4972 
4973     if (!urb_hashtable) {
4974         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4975     }
4976     return urb_hashtable;
4977 }
4978 
4979 static void urb_hashtable_insert(struct live_urb *urb)
4980 {
4981     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4982     g_hash_table_insert(urb_hashtable, urb, urb);
4983 }
4984 
4985 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4986 {
4987     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4988     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4989 }
4990 
4991 static void urb_hashtable_remove(struct live_urb *urb)
4992 {
4993     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4994     g_hash_table_remove(urb_hashtable, urb);
4995 }
4996 
4997 static abi_long
4998 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4999                           int fd, int cmd, abi_long arg)
5000 {
5001     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5002     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5003     struct live_urb *lurb;
5004     void *argptr;
5005     uint64_t hurb;
5006     int target_size;
5007     uintptr_t target_urb_adr;
5008     abi_long ret;
5009 
5010     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5011 
5012     memset(buf_temp, 0, sizeof(uint64_t));
5013     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5014     if (is_error(ret)) {
5015         return ret;
5016     }
5017 
5018     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5019     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5020     if (!lurb->target_urb_adr) {
5021         return -TARGET_EFAULT;
5022     }
5023     urb_hashtable_remove(lurb);
5024     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5025         lurb->host_urb.buffer_length);
5026     lurb->target_buf_ptr = NULL;
5027 
5028     /* restore the guest buffer pointer */
5029     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5030 
5031     /* update the guest urb struct */
5032     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5033     if (!argptr) {
5034         g_free(lurb);
5035         return -TARGET_EFAULT;
5036     }
5037     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5038     unlock_user(argptr, lurb->target_urb_adr, target_size);
5039 
5040     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5041     /* write back the urb handle */
5042     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5043     if (!argptr) {
5044         g_free(lurb);
5045         return -TARGET_EFAULT;
5046     }
5047 
5048     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5049     target_urb_adr = lurb->target_urb_adr;
5050     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5051     unlock_user(argptr, arg, target_size);
5052 
5053     g_free(lurb);
5054     return ret;
5055 }
5056 
5057 static abi_long
5058 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5059                              uint8_t *buf_temp __attribute__((unused)),
5060                              int fd, int cmd, abi_long arg)
5061 {
5062     struct live_urb *lurb;
5063 
5064     /* map target address back to host URB with metadata. */
5065     lurb = urb_hashtable_lookup(arg);
5066     if (!lurb) {
5067         return -TARGET_EFAULT;
5068     }
5069     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5070 }
5071 
5072 static abi_long
5073 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5074                             int fd, int cmd, abi_long arg)
5075 {
5076     const argtype *arg_type = ie->arg_type;
5077     int target_size;
5078     abi_long ret;
5079     void *argptr;
5080     int rw_dir;
5081     struct live_urb *lurb;
5082 
5083     /*
5084      * each submitted URB needs to map to a unique ID for the
5085      * kernel, and that unique ID needs to be a pointer to
5086      * host memory.  hence, we need to malloc for each URB.
5087      * isochronous transfers have a variable length struct.
5088      */
5089     arg_type++;
5090     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5091 
5092     /* construct host copy of urb and metadata */
5093     lurb = g_try_new0(struct live_urb, 1);
5094     if (!lurb) {
5095         return -TARGET_ENOMEM;
5096     }
5097 
5098     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5099     if (!argptr) {
5100         g_free(lurb);
5101         return -TARGET_EFAULT;
5102     }
5103     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5104     unlock_user(argptr, arg, 0);
5105 
5106     lurb->target_urb_adr = arg;
5107     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5108 
5109     /* buffer space used depends on endpoint type so lock the entire buffer */
5110     /* control type urbs should check the buffer contents for true direction */
5111     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5112     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5113         lurb->host_urb.buffer_length, 1);
5114     if (lurb->target_buf_ptr == NULL) {
5115         g_free(lurb);
5116         return -TARGET_EFAULT;
5117     }
5118 
5119     /* update buffer pointer in host copy */
5120     lurb->host_urb.buffer = lurb->target_buf_ptr;
5121 
5122     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5123     if (is_error(ret)) {
5124         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5125         g_free(lurb);
5126     } else {
5127         urb_hashtable_insert(lurb);
5128     }
5129 
5130     return ret;
5131 }
5132 #endif /* CONFIG_USBFS */
5133 
5134 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5135                             int cmd, abi_long arg)
5136 {
5137     void *argptr;
5138     struct dm_ioctl *host_dm;
5139     abi_long guest_data;
5140     uint32_t guest_data_size;
5141     int target_size;
5142     const argtype *arg_type = ie->arg_type;
5143     abi_long ret;
5144     void *big_buf = NULL;
5145     char *host_data;
5146 
5147     arg_type++;
5148     target_size = thunk_type_size(arg_type, 0);
5149     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5150     if (!argptr) {
5151         ret = -TARGET_EFAULT;
5152         goto out;
5153     }
5154     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5155     unlock_user(argptr, arg, 0);
5156 
5157     /* buf_temp is too small, so fetch things into a bigger buffer */
5158     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5159     memcpy(big_buf, buf_temp, target_size);
5160     buf_temp = big_buf;
5161     host_dm = big_buf;
5162 
5163     guest_data = arg + host_dm->data_start;
5164     if ((guest_data - arg) < 0) {
5165         ret = -TARGET_EINVAL;
5166         goto out;
5167     }
5168     guest_data_size = host_dm->data_size - host_dm->data_start;
5169     host_data = (char*)host_dm + host_dm->data_start;
5170 
5171     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5172     if (!argptr) {
5173         ret = -TARGET_EFAULT;
5174         goto out;
5175     }
5176 
5177     switch (ie->host_cmd) {
5178     case DM_REMOVE_ALL:
5179     case DM_LIST_DEVICES:
5180     case DM_DEV_CREATE:
5181     case DM_DEV_REMOVE:
5182     case DM_DEV_SUSPEND:
5183     case DM_DEV_STATUS:
5184     case DM_DEV_WAIT:
5185     case DM_TABLE_STATUS:
5186     case DM_TABLE_CLEAR:
5187     case DM_TABLE_DEPS:
5188     case DM_LIST_VERSIONS:
5189         /* no input data */
5190         break;
5191     case DM_DEV_RENAME:
5192     case DM_DEV_SET_GEOMETRY:
5193         /* data contains only strings */
5194         memcpy(host_data, argptr, guest_data_size);
5195         break;
5196     case DM_TARGET_MSG:
5197         memcpy(host_data, argptr, guest_data_size);
5198         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5199         break;
5200     case DM_TABLE_LOAD:
5201     {
5202         void *gspec = argptr;
5203         void *cur_data = host_data;
5204         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5205         int spec_size = thunk_type_size(arg_type, 0);
5206         int i;
5207 
5208         for (i = 0; i < host_dm->target_count; i++) {
5209             struct dm_target_spec *spec = cur_data;
5210             uint32_t next;
5211             int slen;
5212 
5213             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5214             slen = strlen((char*)gspec + spec_size) + 1;
5215             next = spec->next;
5216             spec->next = sizeof(*spec) + slen;
5217             strcpy((char*)&spec[1], gspec + spec_size);
5218             gspec += next;
5219             cur_data += spec->next;
5220         }
5221         break;
5222     }
5223     default:
5224         ret = -TARGET_EINVAL;
5225         unlock_user(argptr, guest_data, 0);
5226         goto out;
5227     }
5228     unlock_user(argptr, guest_data, 0);
5229 
5230     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5231     if (!is_error(ret)) {
5232         guest_data = arg + host_dm->data_start;
5233         guest_data_size = host_dm->data_size - host_dm->data_start;
5234         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5235         switch (ie->host_cmd) {
5236         case DM_REMOVE_ALL:
5237         case DM_DEV_CREATE:
5238         case DM_DEV_REMOVE:
5239         case DM_DEV_RENAME:
5240         case DM_DEV_SUSPEND:
5241         case DM_DEV_STATUS:
5242         case DM_TABLE_LOAD:
5243         case DM_TABLE_CLEAR:
5244         case DM_TARGET_MSG:
5245         case DM_DEV_SET_GEOMETRY:
5246             /* no return data */
5247             break;
5248         case DM_LIST_DEVICES:
5249         {
5250             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5251             uint32_t remaining_data = guest_data_size;
5252             void *cur_data = argptr;
5253             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5254             int nl_size = 12; /* can't use thunk_size due to alignment */
5255 
5256             while (1) {
5257                 uint32_t next = nl->next;
5258                 if (next) {
5259                     nl->next = nl_size + (strlen(nl->name) + 1);
5260                 }
5261                 if (remaining_data < nl->next) {
5262                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5263                     break;
5264                 }
5265                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5266                 strcpy(cur_data + nl_size, nl->name);
5267                 cur_data += nl->next;
5268                 remaining_data -= nl->next;
5269                 if (!next) {
5270                     break;
5271                 }
5272                 nl = (void*)nl + next;
5273             }
5274             break;
5275         }
5276         case DM_DEV_WAIT:
5277         case DM_TABLE_STATUS:
5278         {
5279             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5280             void *cur_data = argptr;
5281             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5282             int spec_size = thunk_type_size(arg_type, 0);
5283             int i;
5284 
5285             for (i = 0; i < host_dm->target_count; i++) {
5286                 uint32_t next = spec->next;
5287                 int slen = strlen((char*)&spec[1]) + 1;
5288                 spec->next = (cur_data - argptr) + spec_size + slen;
5289                 if (guest_data_size < spec->next) {
5290                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5291                     break;
5292                 }
5293                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5294                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5295                 cur_data = argptr + spec->next;
5296                 spec = (void*)host_dm + host_dm->data_start + next;
5297             }
5298             break;
5299         }
5300         case DM_TABLE_DEPS:
5301         {
5302             void *hdata = (void*)host_dm + host_dm->data_start;
5303             int count = *(uint32_t*)hdata;
5304             uint64_t *hdev = hdata + 8;
5305             uint64_t *gdev = argptr + 8;
5306             int i;
5307 
5308             *(uint32_t*)argptr = tswap32(count);
5309             for (i = 0; i < count; i++) {
5310                 *gdev = tswap64(*hdev);
5311                 gdev++;
5312                 hdev++;
5313             }
5314             break;
5315         }
5316         case DM_LIST_VERSIONS:
5317         {
5318             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5319             uint32_t remaining_data = guest_data_size;
5320             void *cur_data = argptr;
5321             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5322             int vers_size = thunk_type_size(arg_type, 0);
5323 
5324             while (1) {
5325                 uint32_t next = vers->next;
5326                 if (next) {
5327                     vers->next = vers_size + (strlen(vers->name) + 1);
5328                 }
5329                 if (remaining_data < vers->next) {
5330                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5331                     break;
5332                 }
5333                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5334                 strcpy(cur_data + vers_size, vers->name);
5335                 cur_data += vers->next;
5336                 remaining_data -= vers->next;
5337                 if (!next) {
5338                     break;
5339                 }
5340                 vers = (void*)vers + next;
5341             }
5342             break;
5343         }
5344         default:
5345             unlock_user(argptr, guest_data, 0);
5346             ret = -TARGET_EINVAL;
5347             goto out;
5348         }
5349         unlock_user(argptr, guest_data, guest_data_size);
5350 
5351         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5352         if (!argptr) {
5353             ret = -TARGET_EFAULT;
5354             goto out;
5355         }
5356         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5357         unlock_user(argptr, arg, target_size);
5358     }
5359 out:
5360     g_free(big_buf);
5361     return ret;
5362 }
5363 
5364 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5365                                int cmd, abi_long arg)
5366 {
5367     void *argptr;
5368     int target_size;
5369     const argtype *arg_type = ie->arg_type;
5370     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5371     abi_long ret;
5372 
5373     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5374     struct blkpg_partition host_part;
5375 
5376     /* Read and convert blkpg */
5377     arg_type++;
5378     target_size = thunk_type_size(arg_type, 0);
5379     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5380     if (!argptr) {
5381         ret = -TARGET_EFAULT;
5382         goto out;
5383     }
5384     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5385     unlock_user(argptr, arg, 0);
5386 
5387     switch (host_blkpg->op) {
5388     case BLKPG_ADD_PARTITION:
5389     case BLKPG_DEL_PARTITION:
5390         /* payload is struct blkpg_partition */
5391         break;
5392     default:
5393         /* Unknown opcode */
5394         ret = -TARGET_EINVAL;
5395         goto out;
5396     }
5397 
5398     /* Read and convert blkpg->data */
5399     arg = (abi_long)(uintptr_t)host_blkpg->data;
5400     target_size = thunk_type_size(part_arg_type, 0);
5401     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5402     if (!argptr) {
5403         ret = -TARGET_EFAULT;
5404         goto out;
5405     }
5406     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5407     unlock_user(argptr, arg, 0);
5408 
5409     /* Swizzle the data pointer to our local copy and call! */
5410     host_blkpg->data = &host_part;
5411     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5412 
5413 out:
5414     return ret;
5415 }
5416 
5417 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5418                                 int fd, int cmd, abi_long arg)
5419 {
5420     const argtype *arg_type = ie->arg_type;
5421     const StructEntry *se;
5422     const argtype *field_types;
5423     const int *dst_offsets, *src_offsets;
5424     int target_size;
5425     void *argptr;
5426     abi_ulong *target_rt_dev_ptr = NULL;
5427     unsigned long *host_rt_dev_ptr = NULL;
5428     abi_long ret;
5429     int i;
5430 
5431     assert(ie->access == IOC_W);
5432     assert(*arg_type == TYPE_PTR);
5433     arg_type++;
5434     assert(*arg_type == TYPE_STRUCT);
5435     target_size = thunk_type_size(arg_type, 0);
5436     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5437     if (!argptr) {
5438         return -TARGET_EFAULT;
5439     }
5440     arg_type++;
5441     assert(*arg_type == (int)STRUCT_rtentry);
5442     se = struct_entries + *arg_type++;
5443     assert(se->convert[0] == NULL);
5444     /* convert struct here to be able to catch rt_dev string */
5445     field_types = se->field_types;
5446     dst_offsets = se->field_offsets[THUNK_HOST];
5447     src_offsets = se->field_offsets[THUNK_TARGET];
5448     for (i = 0; i < se->nb_fields; i++) {
5449         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5450             assert(*field_types == TYPE_PTRVOID);
5451             target_rt_dev_ptr = argptr + src_offsets[i];
5452             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5453             if (*target_rt_dev_ptr != 0) {
5454                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5455                                                   tswapal(*target_rt_dev_ptr));
5456                 if (!*host_rt_dev_ptr) {
5457                     unlock_user(argptr, arg, 0);
5458                     return -TARGET_EFAULT;
5459                 }
5460             } else {
5461                 *host_rt_dev_ptr = 0;
5462             }
5463             field_types++;
5464             continue;
5465         }
5466         field_types = thunk_convert(buf_temp + dst_offsets[i],
5467                                     argptr + src_offsets[i],
5468                                     field_types, THUNK_HOST);
5469     }
5470     unlock_user(argptr, arg, 0);
5471 
5472     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5473 
5474     assert(host_rt_dev_ptr != NULL);
5475     assert(target_rt_dev_ptr != NULL);
5476     if (*host_rt_dev_ptr != 0) {
5477         unlock_user((void *)*host_rt_dev_ptr,
5478                     *target_rt_dev_ptr, 0);
5479     }
5480     return ret;
5481 }
5482 
5483 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5484                                      int fd, int cmd, abi_long arg)
5485 {
5486     int sig = target_to_host_signal(arg);
5487     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5488 }
5489 
5490 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5491                                     int fd, int cmd, abi_long arg)
5492 {
5493     struct timeval tv;
5494     abi_long ret;
5495 
5496     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5497     if (is_error(ret)) {
5498         return ret;
5499     }
5500 
5501     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5502         if (copy_to_user_timeval(arg, &tv)) {
5503             return -TARGET_EFAULT;
5504         }
5505     } else {
5506         if (copy_to_user_timeval64(arg, &tv)) {
5507             return -TARGET_EFAULT;
5508         }
5509     }
5510 
5511     return ret;
5512 }
5513 
5514 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5515                                       int fd, int cmd, abi_long arg)
5516 {
5517     struct timespec ts;
5518     abi_long ret;
5519 
5520     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5521     if (is_error(ret)) {
5522         return ret;
5523     }
5524 
5525     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5526         if (host_to_target_timespec(arg, &ts)) {
5527             return -TARGET_EFAULT;
5528         }
5529     } else{
5530         if (host_to_target_timespec64(arg, &ts)) {
5531             return -TARGET_EFAULT;
5532         }
5533     }
5534 
5535     return ret;
5536 }
5537 
5538 #ifdef TIOCGPTPEER
5539 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5540                                      int fd, int cmd, abi_long arg)
5541 {
5542     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5543     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5544 }
5545 #endif
5546 
5547 #ifdef HAVE_DRM_H
5548 
5549 static void unlock_drm_version(struct drm_version *host_ver,
5550                                struct target_drm_version *target_ver,
5551                                bool copy)
5552 {
5553     unlock_user(host_ver->name, target_ver->name,
5554                                 copy ? host_ver->name_len : 0);
5555     unlock_user(host_ver->date, target_ver->date,
5556                                 copy ? host_ver->date_len : 0);
5557     unlock_user(host_ver->desc, target_ver->desc,
5558                                 copy ? host_ver->desc_len : 0);
5559 }
5560 
5561 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5562                                           struct target_drm_version *target_ver)
5563 {
5564     memset(host_ver, 0, sizeof(*host_ver));
5565 
5566     __get_user(host_ver->name_len, &target_ver->name_len);
5567     if (host_ver->name_len) {
5568         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5569                                    target_ver->name_len, 0);
5570         if (!host_ver->name) {
5571             return -EFAULT;
5572         }
5573     }
5574 
5575     __get_user(host_ver->date_len, &target_ver->date_len);
5576     if (host_ver->date_len) {
5577         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5578                                    target_ver->date_len, 0);
5579         if (!host_ver->date) {
5580             goto err;
5581         }
5582     }
5583 
5584     __get_user(host_ver->desc_len, &target_ver->desc_len);
5585     if (host_ver->desc_len) {
5586         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5587                                    target_ver->desc_len, 0);
5588         if (!host_ver->desc) {
5589             goto err;
5590         }
5591     }
5592 
5593     return 0;
5594 err:
5595     unlock_drm_version(host_ver, target_ver, false);
5596     return -EFAULT;
5597 }
5598 
5599 static inline void host_to_target_drmversion(
5600                                           struct target_drm_version *target_ver,
5601                                           struct drm_version *host_ver)
5602 {
5603     __put_user(host_ver->version_major, &target_ver->version_major);
5604     __put_user(host_ver->version_minor, &target_ver->version_minor);
5605     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5606     __put_user(host_ver->name_len, &target_ver->name_len);
5607     __put_user(host_ver->date_len, &target_ver->date_len);
5608     __put_user(host_ver->desc_len, &target_ver->desc_len);
5609     unlock_drm_version(host_ver, target_ver, true);
5610 }
5611 
5612 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5613                              int fd, int cmd, abi_long arg)
5614 {
5615     struct drm_version *ver;
5616     struct target_drm_version *target_ver;
5617     abi_long ret;
5618 
5619     switch (ie->host_cmd) {
5620     case DRM_IOCTL_VERSION:
5621         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5622             return -TARGET_EFAULT;
5623         }
5624         ver = (struct drm_version *)buf_temp;
5625         ret = target_to_host_drmversion(ver, target_ver);
5626         if (!is_error(ret)) {
5627             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5628             if (is_error(ret)) {
5629                 unlock_drm_version(ver, target_ver, false);
5630             } else {
5631                 host_to_target_drmversion(target_ver, ver);
5632             }
5633         }
5634         unlock_user_struct(target_ver, arg, 0);
5635         return ret;
5636     }
5637     return -TARGET_ENOSYS;
5638 }
5639 
5640 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5641                                            struct drm_i915_getparam *gparam,
5642                                            int fd, abi_long arg)
5643 {
5644     abi_long ret;
5645     int value;
5646     struct target_drm_i915_getparam *target_gparam;
5647 
5648     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5649         return -TARGET_EFAULT;
5650     }
5651 
5652     __get_user(gparam->param, &target_gparam->param);
5653     gparam->value = &value;
5654     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5655     put_user_s32(value, target_gparam->value);
5656 
5657     unlock_user_struct(target_gparam, arg, 0);
5658     return ret;
5659 }
5660 
5661 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5662                                   int fd, int cmd, abi_long arg)
5663 {
5664     switch (ie->host_cmd) {
5665     case DRM_IOCTL_I915_GETPARAM:
5666         return do_ioctl_drm_i915_getparam(ie,
5667                                           (struct drm_i915_getparam *)buf_temp,
5668                                           fd, arg);
5669     default:
5670         return -TARGET_ENOSYS;
5671     }
5672 }
5673 
5674 #endif
5675 
5676 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5677                                         int fd, int cmd, abi_long arg)
5678 {
5679     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5680     struct tun_filter *target_filter;
5681     char *target_addr;
5682 
5683     assert(ie->access == IOC_W);
5684 
5685     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5686     if (!target_filter) {
5687         return -TARGET_EFAULT;
5688     }
5689     filter->flags = tswap16(target_filter->flags);
5690     filter->count = tswap16(target_filter->count);
5691     unlock_user(target_filter, arg, 0);
5692 
5693     if (filter->count) {
5694         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5695             MAX_STRUCT_SIZE) {
5696             return -TARGET_EFAULT;
5697         }
5698 
5699         target_addr = lock_user(VERIFY_READ,
5700                                 arg + offsetof(struct tun_filter, addr),
5701                                 filter->count * ETH_ALEN, 1);
5702         if (!target_addr) {
5703             return -TARGET_EFAULT;
5704         }
5705         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5706         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5707     }
5708 
5709     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5710 }
5711 
5712 IOCTLEntry ioctl_entries[] = {
5713 #define IOCTL(cmd, access, ...) \
5714     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5715 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5716     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5717 #define IOCTL_IGNORE(cmd) \
5718     { TARGET_ ## cmd, 0, #cmd },
5719 #include "ioctls.h"
5720     { 0, 0, },
5721 };
5722 
5723 /* ??? Implement proper locking for ioctls.  */
5724 /* do_ioctl() Must return target values and target errnos. */
5725 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5726 {
5727     const IOCTLEntry *ie;
5728     const argtype *arg_type;
5729     abi_long ret;
5730     uint8_t buf_temp[MAX_STRUCT_SIZE];
5731     int target_size;
5732     void *argptr;
5733 
5734     ie = ioctl_entries;
5735     for(;;) {
5736         if (ie->target_cmd == 0) {
5737             qemu_log_mask(
5738                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5739             return -TARGET_ENOSYS;
5740         }
5741         if (ie->target_cmd == cmd)
5742             break;
5743         ie++;
5744     }
5745     arg_type = ie->arg_type;
5746     if (ie->do_ioctl) {
5747         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5748     } else if (!ie->host_cmd) {
5749         /* Some architectures define BSD ioctls in their headers
5750            that are not implemented in Linux.  */
5751         return -TARGET_ENOSYS;
5752     }
5753 
5754     switch(arg_type[0]) {
5755     case TYPE_NULL:
5756         /* no argument */
5757         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5758         break;
5759     case TYPE_PTRVOID:
5760     case TYPE_INT:
5761     case TYPE_LONG:
5762     case TYPE_ULONG:
5763         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5764         break;
5765     case TYPE_PTR:
5766         arg_type++;
5767         target_size = thunk_type_size(arg_type, 0);
5768         switch(ie->access) {
5769         case IOC_R:
5770             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5771             if (!is_error(ret)) {
5772                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5773                 if (!argptr)
5774                     return -TARGET_EFAULT;
5775                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5776                 unlock_user(argptr, arg, target_size);
5777             }
5778             break;
5779         case IOC_W:
5780             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5781             if (!argptr)
5782                 return -TARGET_EFAULT;
5783             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5784             unlock_user(argptr, arg, 0);
5785             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5786             break;
5787         default:
5788         case IOC_RW:
5789             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5790             if (!argptr)
5791                 return -TARGET_EFAULT;
5792             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5793             unlock_user(argptr, arg, 0);
5794             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5795             if (!is_error(ret)) {
5796                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5797                 if (!argptr)
5798                     return -TARGET_EFAULT;
5799                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5800                 unlock_user(argptr, arg, target_size);
5801             }
5802             break;
5803         }
5804         break;
5805     default:
5806         qemu_log_mask(LOG_UNIMP,
5807                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5808                       (long)cmd, arg_type[0]);
5809         ret = -TARGET_ENOSYS;
5810         break;
5811     }
5812     return ret;
5813 }
5814 
5815 static const bitmask_transtbl iflag_tbl[] = {
5816         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5817         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5818         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5819         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5820         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5821         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5822         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5823         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5824         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5825         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5826         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5827         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5828         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5829         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5830         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5831         { 0, 0, 0, 0 }
5832 };
5833 
5834 static const bitmask_transtbl oflag_tbl[] = {
5835 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5836 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5837 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5838 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5839 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5840 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5841 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5842 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5843 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5844 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5845 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5846 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5847 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5848 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5849 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5850 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5851 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5852 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5853 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5854 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5855 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5856 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5857 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5858 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5859 	{ 0, 0, 0, 0 }
5860 };
5861 
5862 static const bitmask_transtbl cflag_tbl[] = {
5863 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5864 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5865 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5866 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5867 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5868 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5869 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5870 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5871 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5872 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5873 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5874 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5875 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5876 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5877 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5878 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5879 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5880 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5881 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5882 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5883 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5884 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5885 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5886 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5887 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5888 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5889 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5890 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5891 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5892 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5893 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5894 	{ 0, 0, 0, 0 }
5895 };
5896 
5897 static const bitmask_transtbl lflag_tbl[] = {
5898   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5899   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5900   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5901   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5902   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5903   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5904   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5905   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5906   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5907   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5908   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5909   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5910   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5911   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5912   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5913   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5914   { 0, 0, 0, 0 }
5915 };
5916 
5917 static void target_to_host_termios (void *dst, const void *src)
5918 {
5919     struct host_termios *host = dst;
5920     const struct target_termios *target = src;
5921 
5922     host->c_iflag =
5923         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5924     host->c_oflag =
5925         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5926     host->c_cflag =
5927         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5928     host->c_lflag =
5929         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5930     host->c_line = target->c_line;
5931 
5932     memset(host->c_cc, 0, sizeof(host->c_cc));
5933     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5934     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5935     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5936     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5937     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5938     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5939     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5940     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5941     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5942     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5943     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5944     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5945     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5946     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5947     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5948     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5949     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5950 }
5951 
5952 static void host_to_target_termios (void *dst, const void *src)
5953 {
5954     struct target_termios *target = dst;
5955     const struct host_termios *host = src;
5956 
5957     target->c_iflag =
5958         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5959     target->c_oflag =
5960         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5961     target->c_cflag =
5962         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5963     target->c_lflag =
5964         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5965     target->c_line = host->c_line;
5966 
5967     memset(target->c_cc, 0, sizeof(target->c_cc));
5968     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5969     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5970     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5971     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5972     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5973     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5974     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5975     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5976     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5977     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5978     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5979     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5980     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5981     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5982     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5983     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5984     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5985 }
5986 
5987 static const StructEntry struct_termios_def = {
5988     .convert = { host_to_target_termios, target_to_host_termios },
5989     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5990     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5991     .print = print_termios,
5992 };
5993 
5994 static const bitmask_transtbl mmap_flags_tbl[] = {
5995     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5996     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5997     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5998     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5999       MAP_ANONYMOUS, MAP_ANONYMOUS },
6000     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6001       MAP_GROWSDOWN, MAP_GROWSDOWN },
6002     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6003       MAP_DENYWRITE, MAP_DENYWRITE },
6004     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6005       MAP_EXECUTABLE, MAP_EXECUTABLE },
6006     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6007     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6008       MAP_NORESERVE, MAP_NORESERVE },
6009     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6010     /* MAP_STACK had been ignored by the kernel for quite some time.
6011        Recognize it for the target insofar as we do not want to pass
6012        it through to the host.  */
6013     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6014     { 0, 0, 0, 0 }
6015 };
6016 
6017 /*
6018  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6019  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6020  */
6021 #if defined(TARGET_I386)
6022 
6023 /* NOTE: there is really one LDT for all the threads */
6024 static uint8_t *ldt_table;
6025 
6026 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6027 {
6028     int size;
6029     void *p;
6030 
6031     if (!ldt_table)
6032         return 0;
6033     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6034     if (size > bytecount)
6035         size = bytecount;
6036     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6037     if (!p)
6038         return -TARGET_EFAULT;
6039     /* ??? Should this by byteswapped?  */
6040     memcpy(p, ldt_table, size);
6041     unlock_user(p, ptr, size);
6042     return size;
6043 }
6044 
6045 /* XXX: add locking support */
6046 static abi_long write_ldt(CPUX86State *env,
6047                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6048 {
6049     struct target_modify_ldt_ldt_s ldt_info;
6050     struct target_modify_ldt_ldt_s *target_ldt_info;
6051     int seg_32bit, contents, read_exec_only, limit_in_pages;
6052     int seg_not_present, useable, lm;
6053     uint32_t *lp, entry_1, entry_2;
6054 
6055     if (bytecount != sizeof(ldt_info))
6056         return -TARGET_EINVAL;
6057     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6058         return -TARGET_EFAULT;
6059     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6060     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6061     ldt_info.limit = tswap32(target_ldt_info->limit);
6062     ldt_info.flags = tswap32(target_ldt_info->flags);
6063     unlock_user_struct(target_ldt_info, ptr, 0);
6064 
6065     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6066         return -TARGET_EINVAL;
6067     seg_32bit = ldt_info.flags & 1;
6068     contents = (ldt_info.flags >> 1) & 3;
6069     read_exec_only = (ldt_info.flags >> 3) & 1;
6070     limit_in_pages = (ldt_info.flags >> 4) & 1;
6071     seg_not_present = (ldt_info.flags >> 5) & 1;
6072     useable = (ldt_info.flags >> 6) & 1;
6073 #ifdef TARGET_ABI32
6074     lm = 0;
6075 #else
6076     lm = (ldt_info.flags >> 7) & 1;
6077 #endif
6078     if (contents == 3) {
6079         if (oldmode)
6080             return -TARGET_EINVAL;
6081         if (seg_not_present == 0)
6082             return -TARGET_EINVAL;
6083     }
6084     /* allocate the LDT */
6085     if (!ldt_table) {
6086         env->ldt.base = target_mmap(0,
6087                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6088                                     PROT_READ|PROT_WRITE,
6089                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6090         if (env->ldt.base == -1)
6091             return -TARGET_ENOMEM;
6092         memset(g2h_untagged(env->ldt.base), 0,
6093                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6094         env->ldt.limit = 0xffff;
6095         ldt_table = g2h_untagged(env->ldt.base);
6096     }
6097 
6098     /* NOTE: same code as Linux kernel */
6099     /* Allow LDTs to be cleared by the user. */
6100     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6101         if (oldmode ||
6102             (contents == 0		&&
6103              read_exec_only == 1	&&
6104              seg_32bit == 0		&&
6105              limit_in_pages == 0	&&
6106              seg_not_present == 1	&&
6107              useable == 0 )) {
6108             entry_1 = 0;
6109             entry_2 = 0;
6110             goto install;
6111         }
6112     }
6113 
6114     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6115         (ldt_info.limit & 0x0ffff);
6116     entry_2 = (ldt_info.base_addr & 0xff000000) |
6117         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6118         (ldt_info.limit & 0xf0000) |
6119         ((read_exec_only ^ 1) << 9) |
6120         (contents << 10) |
6121         ((seg_not_present ^ 1) << 15) |
6122         (seg_32bit << 22) |
6123         (limit_in_pages << 23) |
6124         (lm << 21) |
6125         0x7000;
6126     if (!oldmode)
6127         entry_2 |= (useable << 20);
6128 
6129     /* Install the new entry ...  */
6130 install:
6131     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6132     lp[0] = tswap32(entry_1);
6133     lp[1] = tswap32(entry_2);
6134     return 0;
6135 }
6136 
6137 /* specific and weird i386 syscalls */
6138 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6139                               unsigned long bytecount)
6140 {
6141     abi_long ret;
6142 
6143     switch (func) {
6144     case 0:
6145         ret = read_ldt(ptr, bytecount);
6146         break;
6147     case 1:
6148         ret = write_ldt(env, ptr, bytecount, 1);
6149         break;
6150     case 0x11:
6151         ret = write_ldt(env, ptr, bytecount, 0);
6152         break;
6153     default:
6154         ret = -TARGET_ENOSYS;
6155         break;
6156     }
6157     return ret;
6158 }
6159 
6160 #if defined(TARGET_ABI32)
6161 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6162 {
6163     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6164     struct target_modify_ldt_ldt_s ldt_info;
6165     struct target_modify_ldt_ldt_s *target_ldt_info;
6166     int seg_32bit, contents, read_exec_only, limit_in_pages;
6167     int seg_not_present, useable, lm;
6168     uint32_t *lp, entry_1, entry_2;
6169     int i;
6170 
6171     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6172     if (!target_ldt_info)
6173         return -TARGET_EFAULT;
6174     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6175     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6176     ldt_info.limit = tswap32(target_ldt_info->limit);
6177     ldt_info.flags = tswap32(target_ldt_info->flags);
6178     if (ldt_info.entry_number == -1) {
6179         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6180             if (gdt_table[i] == 0) {
6181                 ldt_info.entry_number = i;
6182                 target_ldt_info->entry_number = tswap32(i);
6183                 break;
6184             }
6185         }
6186     }
6187     unlock_user_struct(target_ldt_info, ptr, 1);
6188 
6189     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6190         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6191            return -TARGET_EINVAL;
6192     seg_32bit = ldt_info.flags & 1;
6193     contents = (ldt_info.flags >> 1) & 3;
6194     read_exec_only = (ldt_info.flags >> 3) & 1;
6195     limit_in_pages = (ldt_info.flags >> 4) & 1;
6196     seg_not_present = (ldt_info.flags >> 5) & 1;
6197     useable = (ldt_info.flags >> 6) & 1;
6198 #ifdef TARGET_ABI32
6199     lm = 0;
6200 #else
6201     lm = (ldt_info.flags >> 7) & 1;
6202 #endif
6203 
6204     if (contents == 3) {
6205         if (seg_not_present == 0)
6206             return -TARGET_EINVAL;
6207     }
6208 
6209     /* NOTE: same code as Linux kernel */
6210     /* Allow LDTs to be cleared by the user. */
6211     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6212         if ((contents == 0             &&
6213              read_exec_only == 1       &&
6214              seg_32bit == 0            &&
6215              limit_in_pages == 0       &&
6216              seg_not_present == 1      &&
6217              useable == 0 )) {
6218             entry_1 = 0;
6219             entry_2 = 0;
6220             goto install;
6221         }
6222     }
6223 
6224     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6225         (ldt_info.limit & 0x0ffff);
6226     entry_2 = (ldt_info.base_addr & 0xff000000) |
6227         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6228         (ldt_info.limit & 0xf0000) |
6229         ((read_exec_only ^ 1) << 9) |
6230         (contents << 10) |
6231         ((seg_not_present ^ 1) << 15) |
6232         (seg_32bit << 22) |
6233         (limit_in_pages << 23) |
6234         (useable << 20) |
6235         (lm << 21) |
6236         0x7000;
6237 
6238     /* Install the new entry ...  */
6239 install:
6240     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6241     lp[0] = tswap32(entry_1);
6242     lp[1] = tswap32(entry_2);
6243     return 0;
6244 }
6245 
6246 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6247 {
6248     struct target_modify_ldt_ldt_s *target_ldt_info;
6249     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6250     uint32_t base_addr, limit, flags;
6251     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6252     int seg_not_present, useable, lm;
6253     uint32_t *lp, entry_1, entry_2;
6254 
6255     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6256     if (!target_ldt_info)
6257         return -TARGET_EFAULT;
6258     idx = tswap32(target_ldt_info->entry_number);
6259     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6260         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6261         unlock_user_struct(target_ldt_info, ptr, 1);
6262         return -TARGET_EINVAL;
6263     }
6264     lp = (uint32_t *)(gdt_table + idx);
6265     entry_1 = tswap32(lp[0]);
6266     entry_2 = tswap32(lp[1]);
6267 
6268     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6269     contents = (entry_2 >> 10) & 3;
6270     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6271     seg_32bit = (entry_2 >> 22) & 1;
6272     limit_in_pages = (entry_2 >> 23) & 1;
6273     useable = (entry_2 >> 20) & 1;
6274 #ifdef TARGET_ABI32
6275     lm = 0;
6276 #else
6277     lm = (entry_2 >> 21) & 1;
6278 #endif
6279     flags = (seg_32bit << 0) | (contents << 1) |
6280         (read_exec_only << 3) | (limit_in_pages << 4) |
6281         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6282     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6283     base_addr = (entry_1 >> 16) |
6284         (entry_2 & 0xff000000) |
6285         ((entry_2 & 0xff) << 16);
6286     target_ldt_info->base_addr = tswapal(base_addr);
6287     target_ldt_info->limit = tswap32(limit);
6288     target_ldt_info->flags = tswap32(flags);
6289     unlock_user_struct(target_ldt_info, ptr, 1);
6290     return 0;
6291 }
6292 
6293 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6294 {
6295     return -TARGET_ENOSYS;
6296 }
6297 #else
6298 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6299 {
6300     abi_long ret = 0;
6301     abi_ulong val;
6302     int idx;
6303 
6304     switch(code) {
6305     case TARGET_ARCH_SET_GS:
6306     case TARGET_ARCH_SET_FS:
6307         if (code == TARGET_ARCH_SET_GS)
6308             idx = R_GS;
6309         else
6310             idx = R_FS;
6311         cpu_x86_load_seg(env, idx, 0);
6312         env->segs[idx].base = addr;
6313         break;
6314     case TARGET_ARCH_GET_GS:
6315     case TARGET_ARCH_GET_FS:
6316         if (code == TARGET_ARCH_GET_GS)
6317             idx = R_GS;
6318         else
6319             idx = R_FS;
6320         val = env->segs[idx].base;
6321         if (put_user(val, addr, abi_ulong))
6322             ret = -TARGET_EFAULT;
6323         break;
6324     default:
6325         ret = -TARGET_EINVAL;
6326         break;
6327     }
6328     return ret;
6329 }
6330 #endif /* defined(TARGET_ABI32 */
6331 #endif /* defined(TARGET_I386) */
6332 
6333 /*
6334  * These constants are generic.  Supply any that are missing from the host.
6335  */
6336 #ifndef PR_SET_NAME
6337 # define PR_SET_NAME    15
6338 # define PR_GET_NAME    16
6339 #endif
6340 #ifndef PR_SET_FP_MODE
6341 # define PR_SET_FP_MODE 45
6342 # define PR_GET_FP_MODE 46
6343 # define PR_FP_MODE_FR   (1 << 0)
6344 # define PR_FP_MODE_FRE  (1 << 1)
6345 #endif
6346 #ifndef PR_SVE_SET_VL
6347 # define PR_SVE_SET_VL  50
6348 # define PR_SVE_GET_VL  51
6349 # define PR_SVE_VL_LEN_MASK  0xffff
6350 # define PR_SVE_VL_INHERIT   (1 << 17)
6351 #endif
6352 #ifndef PR_PAC_RESET_KEYS
6353 # define PR_PAC_RESET_KEYS  54
6354 # define PR_PAC_APIAKEY   (1 << 0)
6355 # define PR_PAC_APIBKEY   (1 << 1)
6356 # define PR_PAC_APDAKEY   (1 << 2)
6357 # define PR_PAC_APDBKEY   (1 << 3)
6358 # define PR_PAC_APGAKEY   (1 << 4)
6359 #endif
6360 #ifndef PR_SET_TAGGED_ADDR_CTRL
6361 # define PR_SET_TAGGED_ADDR_CTRL 55
6362 # define PR_GET_TAGGED_ADDR_CTRL 56
6363 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6364 #endif
6365 #ifndef PR_MTE_TCF_SHIFT
6366 # define PR_MTE_TCF_SHIFT       1
6367 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6368 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6369 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6370 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6371 # define PR_MTE_TAG_SHIFT       3
6372 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6373 #endif
6374 #ifndef PR_SET_IO_FLUSHER
6375 # define PR_SET_IO_FLUSHER 57
6376 # define PR_GET_IO_FLUSHER 58
6377 #endif
6378 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6379 # define PR_SET_SYSCALL_USER_DISPATCH 59
6380 #endif
6381 #ifndef PR_SME_SET_VL
6382 # define PR_SME_SET_VL  63
6383 # define PR_SME_GET_VL  64
6384 # define PR_SME_VL_LEN_MASK  0xffff
6385 # define PR_SME_VL_INHERIT   (1 << 17)
6386 #endif
6387 
6388 #include "target_prctl.h"
6389 
6390 static abi_long do_prctl_inval0(CPUArchState *env)
6391 {
6392     return -TARGET_EINVAL;
6393 }
6394 
6395 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6396 {
6397     return -TARGET_EINVAL;
6398 }
6399 
6400 #ifndef do_prctl_get_fp_mode
6401 #define do_prctl_get_fp_mode do_prctl_inval0
6402 #endif
6403 #ifndef do_prctl_set_fp_mode
6404 #define do_prctl_set_fp_mode do_prctl_inval1
6405 #endif
6406 #ifndef do_prctl_sve_get_vl
6407 #define do_prctl_sve_get_vl do_prctl_inval0
6408 #endif
6409 #ifndef do_prctl_sve_set_vl
6410 #define do_prctl_sve_set_vl do_prctl_inval1
6411 #endif
6412 #ifndef do_prctl_reset_keys
6413 #define do_prctl_reset_keys do_prctl_inval1
6414 #endif
6415 #ifndef do_prctl_set_tagged_addr_ctrl
6416 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6417 #endif
6418 #ifndef do_prctl_get_tagged_addr_ctrl
6419 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6420 #endif
6421 #ifndef do_prctl_get_unalign
6422 #define do_prctl_get_unalign do_prctl_inval1
6423 #endif
6424 #ifndef do_prctl_set_unalign
6425 #define do_prctl_set_unalign do_prctl_inval1
6426 #endif
6427 #ifndef do_prctl_sme_get_vl
6428 #define do_prctl_sme_get_vl do_prctl_inval0
6429 #endif
6430 #ifndef do_prctl_sme_set_vl
6431 #define do_prctl_sme_set_vl do_prctl_inval1
6432 #endif
6433 
6434 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6435                          abi_long arg3, abi_long arg4, abi_long arg5)
6436 {
6437     abi_long ret;
6438 
6439     switch (option) {
6440     case PR_GET_PDEATHSIG:
6441         {
6442             int deathsig;
6443             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6444                                   arg3, arg4, arg5));
6445             if (!is_error(ret) &&
6446                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6447                 return -TARGET_EFAULT;
6448             }
6449             return ret;
6450         }
6451     case PR_SET_PDEATHSIG:
6452         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6453                                arg3, arg4, arg5));
6454     case PR_GET_NAME:
6455         {
6456             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6457             if (!name) {
6458                 return -TARGET_EFAULT;
6459             }
6460             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6461                                   arg3, arg4, arg5));
6462             unlock_user(name, arg2, 16);
6463             return ret;
6464         }
6465     case PR_SET_NAME:
6466         {
6467             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6468             if (!name) {
6469                 return -TARGET_EFAULT;
6470             }
6471             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6472                                   arg3, arg4, arg5));
6473             unlock_user(name, arg2, 0);
6474             return ret;
6475         }
6476     case PR_GET_FP_MODE:
6477         return do_prctl_get_fp_mode(env);
6478     case PR_SET_FP_MODE:
6479         return do_prctl_set_fp_mode(env, arg2);
6480     case PR_SVE_GET_VL:
6481         return do_prctl_sve_get_vl(env);
6482     case PR_SVE_SET_VL:
6483         return do_prctl_sve_set_vl(env, arg2);
6484     case PR_SME_GET_VL:
6485         return do_prctl_sme_get_vl(env);
6486     case PR_SME_SET_VL:
6487         return do_prctl_sme_set_vl(env, arg2);
6488     case PR_PAC_RESET_KEYS:
6489         if (arg3 || arg4 || arg5) {
6490             return -TARGET_EINVAL;
6491         }
6492         return do_prctl_reset_keys(env, arg2);
6493     case PR_SET_TAGGED_ADDR_CTRL:
6494         if (arg3 || arg4 || arg5) {
6495             return -TARGET_EINVAL;
6496         }
6497         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6498     case PR_GET_TAGGED_ADDR_CTRL:
6499         if (arg2 || arg3 || arg4 || arg5) {
6500             return -TARGET_EINVAL;
6501         }
6502         return do_prctl_get_tagged_addr_ctrl(env);
6503 
6504     case PR_GET_UNALIGN:
6505         return do_prctl_get_unalign(env, arg2);
6506     case PR_SET_UNALIGN:
6507         return do_prctl_set_unalign(env, arg2);
6508 
6509     case PR_CAP_AMBIENT:
6510     case PR_CAPBSET_READ:
6511     case PR_CAPBSET_DROP:
6512     case PR_GET_DUMPABLE:
6513     case PR_SET_DUMPABLE:
6514     case PR_GET_KEEPCAPS:
6515     case PR_SET_KEEPCAPS:
6516     case PR_GET_SECUREBITS:
6517     case PR_SET_SECUREBITS:
6518     case PR_GET_TIMING:
6519     case PR_SET_TIMING:
6520     case PR_GET_TIMERSLACK:
6521     case PR_SET_TIMERSLACK:
6522     case PR_MCE_KILL:
6523     case PR_MCE_KILL_GET:
6524     case PR_GET_NO_NEW_PRIVS:
6525     case PR_SET_NO_NEW_PRIVS:
6526     case PR_GET_IO_FLUSHER:
6527     case PR_SET_IO_FLUSHER:
6528         /* Some prctl options have no pointer arguments and we can pass on. */
6529         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6530 
6531     case PR_GET_CHILD_SUBREAPER:
6532     case PR_SET_CHILD_SUBREAPER:
6533     case PR_GET_SPECULATION_CTRL:
6534     case PR_SET_SPECULATION_CTRL:
6535     case PR_GET_TID_ADDRESS:
6536         /* TODO */
6537         return -TARGET_EINVAL;
6538 
6539     case PR_GET_FPEXC:
6540     case PR_SET_FPEXC:
6541         /* Was used for SPE on PowerPC. */
6542         return -TARGET_EINVAL;
6543 
6544     case PR_GET_ENDIAN:
6545     case PR_SET_ENDIAN:
6546     case PR_GET_FPEMU:
6547     case PR_SET_FPEMU:
6548     case PR_SET_MM:
6549     case PR_GET_SECCOMP:
6550     case PR_SET_SECCOMP:
6551     case PR_SET_SYSCALL_USER_DISPATCH:
6552     case PR_GET_THP_DISABLE:
6553     case PR_SET_THP_DISABLE:
6554     case PR_GET_TSC:
6555     case PR_SET_TSC:
6556         /* Disable to prevent the target disabling stuff we need. */
6557         return -TARGET_EINVAL;
6558 
6559     default:
6560         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6561                       option);
6562         return -TARGET_EINVAL;
6563     }
6564 }
6565 
6566 #define NEW_STACK_SIZE 0x40000
6567 
6568 
6569 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6570 typedef struct {
6571     CPUArchState *env;
6572     pthread_mutex_t mutex;
6573     pthread_cond_t cond;
6574     pthread_t thread;
6575     uint32_t tid;
6576     abi_ulong child_tidptr;
6577     abi_ulong parent_tidptr;
6578     sigset_t sigmask;
6579 } new_thread_info;
6580 
6581 static void *clone_func(void *arg)
6582 {
6583     new_thread_info *info = arg;
6584     CPUArchState *env;
6585     CPUState *cpu;
6586     TaskState *ts;
6587 
6588     rcu_register_thread();
6589     tcg_register_thread();
6590     env = info->env;
6591     cpu = env_cpu(env);
6592     thread_cpu = cpu;
6593     ts = (TaskState *)cpu->opaque;
6594     info->tid = sys_gettid();
6595     task_settid(ts);
6596     if (info->child_tidptr)
6597         put_user_u32(info->tid, info->child_tidptr);
6598     if (info->parent_tidptr)
6599         put_user_u32(info->tid, info->parent_tidptr);
6600     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6601     /* Enable signals.  */
6602     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6603     /* Signal to the parent that we're ready.  */
6604     pthread_mutex_lock(&info->mutex);
6605     pthread_cond_broadcast(&info->cond);
6606     pthread_mutex_unlock(&info->mutex);
6607     /* Wait until the parent has finished initializing the tls state.  */
6608     pthread_mutex_lock(&clone_lock);
6609     pthread_mutex_unlock(&clone_lock);
6610     cpu_loop(env);
6611     /* never exits */
6612     return NULL;
6613 }
6614 
6615 /* do_fork() Must return host values and target errnos (unlike most
6616    do_*() functions). */
6617 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6618                    abi_ulong parent_tidptr, target_ulong newtls,
6619                    abi_ulong child_tidptr)
6620 {
6621     CPUState *cpu = env_cpu(env);
6622     int ret;
6623     TaskState *ts;
6624     CPUState *new_cpu;
6625     CPUArchState *new_env;
6626     sigset_t sigmask;
6627 
6628     flags &= ~CLONE_IGNORED_FLAGS;
6629 
6630     /* Emulate vfork() with fork() */
6631     if (flags & CLONE_VFORK)
6632         flags &= ~(CLONE_VFORK | CLONE_VM);
6633 
6634     if (flags & CLONE_VM) {
6635         TaskState *parent_ts = (TaskState *)cpu->opaque;
6636         new_thread_info info;
6637         pthread_attr_t attr;
6638 
6639         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6640             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6641             return -TARGET_EINVAL;
6642         }
6643 
6644         ts = g_new0(TaskState, 1);
6645         init_task_state(ts);
6646 
6647         /* Grab a mutex so that thread setup appears atomic.  */
6648         pthread_mutex_lock(&clone_lock);
6649 
6650         /*
6651          * If this is our first additional thread, we need to ensure we
6652          * generate code for parallel execution and flush old translations.
6653          * Do this now so that the copy gets CF_PARALLEL too.
6654          */
6655         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6656             cpu->tcg_cflags |= CF_PARALLEL;
6657             tb_flush(cpu);
6658         }
6659 
6660         /* we create a new CPU instance. */
6661         new_env = cpu_copy(env);
6662         /* Init regs that differ from the parent.  */
6663         cpu_clone_regs_child(new_env, newsp, flags);
6664         cpu_clone_regs_parent(env, flags);
6665         new_cpu = env_cpu(new_env);
6666         new_cpu->opaque = ts;
6667         ts->bprm = parent_ts->bprm;
6668         ts->info = parent_ts->info;
6669         ts->signal_mask = parent_ts->signal_mask;
6670 
6671         if (flags & CLONE_CHILD_CLEARTID) {
6672             ts->child_tidptr = child_tidptr;
6673         }
6674 
6675         if (flags & CLONE_SETTLS) {
6676             cpu_set_tls (new_env, newtls);
6677         }
6678 
6679         memset(&info, 0, sizeof(info));
6680         pthread_mutex_init(&info.mutex, NULL);
6681         pthread_mutex_lock(&info.mutex);
6682         pthread_cond_init(&info.cond, NULL);
6683         info.env = new_env;
6684         if (flags & CLONE_CHILD_SETTID) {
6685             info.child_tidptr = child_tidptr;
6686         }
6687         if (flags & CLONE_PARENT_SETTID) {
6688             info.parent_tidptr = parent_tidptr;
6689         }
6690 
6691         ret = pthread_attr_init(&attr);
6692         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6693         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6694         /* It is not safe to deliver signals until the child has finished
6695            initializing, so temporarily block all signals.  */
6696         sigfillset(&sigmask);
6697         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6698         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6699 
6700         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6701         /* TODO: Free new CPU state if thread creation failed.  */
6702 
6703         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6704         pthread_attr_destroy(&attr);
6705         if (ret == 0) {
6706             /* Wait for the child to initialize.  */
6707             pthread_cond_wait(&info.cond, &info.mutex);
6708             ret = info.tid;
6709         } else {
6710             ret = -1;
6711         }
6712         pthread_mutex_unlock(&info.mutex);
6713         pthread_cond_destroy(&info.cond);
6714         pthread_mutex_destroy(&info.mutex);
6715         pthread_mutex_unlock(&clone_lock);
6716     } else {
6717         /* if no CLONE_VM, we consider it is a fork */
6718         if (flags & CLONE_INVALID_FORK_FLAGS) {
6719             return -TARGET_EINVAL;
6720         }
6721 
6722         /* We can't support custom termination signals */
6723         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6724             return -TARGET_EINVAL;
6725         }
6726 
6727         if (block_signals()) {
6728             return -QEMU_ERESTARTSYS;
6729         }
6730 
6731         fork_start();
6732         ret = fork();
6733         if (ret == 0) {
6734             /* Child Process.  */
6735             cpu_clone_regs_child(env, newsp, flags);
6736             fork_end(1);
6737             /* There is a race condition here.  The parent process could
6738                theoretically read the TID in the child process before the child
6739                tid is set.  This would require using either ptrace
6740                (not implemented) or having *_tidptr to point at a shared memory
6741                mapping.  We can't repeat the spinlock hack used above because
6742                the child process gets its own copy of the lock.  */
6743             if (flags & CLONE_CHILD_SETTID)
6744                 put_user_u32(sys_gettid(), child_tidptr);
6745             if (flags & CLONE_PARENT_SETTID)
6746                 put_user_u32(sys_gettid(), parent_tidptr);
6747             ts = (TaskState *)cpu->opaque;
6748             if (flags & CLONE_SETTLS)
6749                 cpu_set_tls (env, newtls);
6750             if (flags & CLONE_CHILD_CLEARTID)
6751                 ts->child_tidptr = child_tidptr;
6752         } else {
6753             cpu_clone_regs_parent(env, flags);
6754             fork_end(0);
6755         }
6756         g_assert(!cpu_in_exclusive_context(cpu));
6757     }
6758     return ret;
6759 }
6760 
6761 /* warning : doesn't handle linux specific flags... */
6762 static int target_to_host_fcntl_cmd(int cmd)
6763 {
6764     int ret;
6765 
6766     switch(cmd) {
6767     case TARGET_F_DUPFD:
6768     case TARGET_F_GETFD:
6769     case TARGET_F_SETFD:
6770     case TARGET_F_GETFL:
6771     case TARGET_F_SETFL:
6772     case TARGET_F_OFD_GETLK:
6773     case TARGET_F_OFD_SETLK:
6774     case TARGET_F_OFD_SETLKW:
6775         ret = cmd;
6776         break;
6777     case TARGET_F_GETLK:
6778         ret = F_GETLK64;
6779         break;
6780     case TARGET_F_SETLK:
6781         ret = F_SETLK64;
6782         break;
6783     case TARGET_F_SETLKW:
6784         ret = F_SETLKW64;
6785         break;
6786     case TARGET_F_GETOWN:
6787         ret = F_GETOWN;
6788         break;
6789     case TARGET_F_SETOWN:
6790         ret = F_SETOWN;
6791         break;
6792     case TARGET_F_GETSIG:
6793         ret = F_GETSIG;
6794         break;
6795     case TARGET_F_SETSIG:
6796         ret = F_SETSIG;
6797         break;
6798 #if TARGET_ABI_BITS == 32
6799     case TARGET_F_GETLK64:
6800         ret = F_GETLK64;
6801         break;
6802     case TARGET_F_SETLK64:
6803         ret = F_SETLK64;
6804         break;
6805     case TARGET_F_SETLKW64:
6806         ret = F_SETLKW64;
6807         break;
6808 #endif
6809     case TARGET_F_SETLEASE:
6810         ret = F_SETLEASE;
6811         break;
6812     case TARGET_F_GETLEASE:
6813         ret = F_GETLEASE;
6814         break;
6815 #ifdef F_DUPFD_CLOEXEC
6816     case TARGET_F_DUPFD_CLOEXEC:
6817         ret = F_DUPFD_CLOEXEC;
6818         break;
6819 #endif
6820     case TARGET_F_NOTIFY:
6821         ret = F_NOTIFY;
6822         break;
6823 #ifdef F_GETOWN_EX
6824     case TARGET_F_GETOWN_EX:
6825         ret = F_GETOWN_EX;
6826         break;
6827 #endif
6828 #ifdef F_SETOWN_EX
6829     case TARGET_F_SETOWN_EX:
6830         ret = F_SETOWN_EX;
6831         break;
6832 #endif
6833 #ifdef F_SETPIPE_SZ
6834     case TARGET_F_SETPIPE_SZ:
6835         ret = F_SETPIPE_SZ;
6836         break;
6837     case TARGET_F_GETPIPE_SZ:
6838         ret = F_GETPIPE_SZ;
6839         break;
6840 #endif
6841 #ifdef F_ADD_SEALS
6842     case TARGET_F_ADD_SEALS:
6843         ret = F_ADD_SEALS;
6844         break;
6845     case TARGET_F_GET_SEALS:
6846         ret = F_GET_SEALS;
6847         break;
6848 #endif
6849     default:
6850         ret = -TARGET_EINVAL;
6851         break;
6852     }
6853 
6854 #if defined(__powerpc64__)
6855     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6856      * is not supported by kernel. The glibc fcntl call actually adjusts
6857      * them to 5, 6 and 7 before making the syscall(). Since we make the
6858      * syscall directly, adjust to what is supported by the kernel.
6859      */
6860     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6861         ret -= F_GETLK64 - 5;
6862     }
6863 #endif
6864 
6865     return ret;
6866 }
6867 
6868 #define FLOCK_TRANSTBL \
6869     switch (type) { \
6870     TRANSTBL_CONVERT(F_RDLCK); \
6871     TRANSTBL_CONVERT(F_WRLCK); \
6872     TRANSTBL_CONVERT(F_UNLCK); \
6873     }
6874 
6875 static int target_to_host_flock(int type)
6876 {
6877 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6878     FLOCK_TRANSTBL
6879 #undef  TRANSTBL_CONVERT
6880     return -TARGET_EINVAL;
6881 }
6882 
6883 static int host_to_target_flock(int type)
6884 {
6885 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6886     FLOCK_TRANSTBL
6887 #undef  TRANSTBL_CONVERT
6888     /* if we don't know how to convert the value coming
6889      * from the host we copy to the target field as-is
6890      */
6891     return type;
6892 }
6893 
6894 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6895                                             abi_ulong target_flock_addr)
6896 {
6897     struct target_flock *target_fl;
6898     int l_type;
6899 
6900     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6901         return -TARGET_EFAULT;
6902     }
6903 
6904     __get_user(l_type, &target_fl->l_type);
6905     l_type = target_to_host_flock(l_type);
6906     if (l_type < 0) {
6907         return l_type;
6908     }
6909     fl->l_type = l_type;
6910     __get_user(fl->l_whence, &target_fl->l_whence);
6911     __get_user(fl->l_start, &target_fl->l_start);
6912     __get_user(fl->l_len, &target_fl->l_len);
6913     __get_user(fl->l_pid, &target_fl->l_pid);
6914     unlock_user_struct(target_fl, target_flock_addr, 0);
6915     return 0;
6916 }
6917 
6918 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6919                                           const struct flock64 *fl)
6920 {
6921     struct target_flock *target_fl;
6922     short l_type;
6923 
6924     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6925         return -TARGET_EFAULT;
6926     }
6927 
6928     l_type = host_to_target_flock(fl->l_type);
6929     __put_user(l_type, &target_fl->l_type);
6930     __put_user(fl->l_whence, &target_fl->l_whence);
6931     __put_user(fl->l_start, &target_fl->l_start);
6932     __put_user(fl->l_len, &target_fl->l_len);
6933     __put_user(fl->l_pid, &target_fl->l_pid);
6934     unlock_user_struct(target_fl, target_flock_addr, 1);
6935     return 0;
6936 }
6937 
6938 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6939 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6940 
6941 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6942 struct target_oabi_flock64 {
6943     abi_short l_type;
6944     abi_short l_whence;
6945     abi_llong l_start;
6946     abi_llong l_len;
6947     abi_int   l_pid;
6948 } QEMU_PACKED;
6949 
6950 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6951                                                    abi_ulong target_flock_addr)
6952 {
6953     struct target_oabi_flock64 *target_fl;
6954     int l_type;
6955 
6956     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6957         return -TARGET_EFAULT;
6958     }
6959 
6960     __get_user(l_type, &target_fl->l_type);
6961     l_type = target_to_host_flock(l_type);
6962     if (l_type < 0) {
6963         return l_type;
6964     }
6965     fl->l_type = l_type;
6966     __get_user(fl->l_whence, &target_fl->l_whence);
6967     __get_user(fl->l_start, &target_fl->l_start);
6968     __get_user(fl->l_len, &target_fl->l_len);
6969     __get_user(fl->l_pid, &target_fl->l_pid);
6970     unlock_user_struct(target_fl, target_flock_addr, 0);
6971     return 0;
6972 }
6973 
6974 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6975                                                  const struct flock64 *fl)
6976 {
6977     struct target_oabi_flock64 *target_fl;
6978     short l_type;
6979 
6980     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6981         return -TARGET_EFAULT;
6982     }
6983 
6984     l_type = host_to_target_flock(fl->l_type);
6985     __put_user(l_type, &target_fl->l_type);
6986     __put_user(fl->l_whence, &target_fl->l_whence);
6987     __put_user(fl->l_start, &target_fl->l_start);
6988     __put_user(fl->l_len, &target_fl->l_len);
6989     __put_user(fl->l_pid, &target_fl->l_pid);
6990     unlock_user_struct(target_fl, target_flock_addr, 1);
6991     return 0;
6992 }
6993 #endif
6994 
6995 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6996                                               abi_ulong target_flock_addr)
6997 {
6998     struct target_flock64 *target_fl;
6999     int l_type;
7000 
7001     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7002         return -TARGET_EFAULT;
7003     }
7004 
7005     __get_user(l_type, &target_fl->l_type);
7006     l_type = target_to_host_flock(l_type);
7007     if (l_type < 0) {
7008         return l_type;
7009     }
7010     fl->l_type = l_type;
7011     __get_user(fl->l_whence, &target_fl->l_whence);
7012     __get_user(fl->l_start, &target_fl->l_start);
7013     __get_user(fl->l_len, &target_fl->l_len);
7014     __get_user(fl->l_pid, &target_fl->l_pid);
7015     unlock_user_struct(target_fl, target_flock_addr, 0);
7016     return 0;
7017 }
7018 
7019 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7020                                             const struct flock64 *fl)
7021 {
7022     struct target_flock64 *target_fl;
7023     short l_type;
7024 
7025     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7026         return -TARGET_EFAULT;
7027     }
7028 
7029     l_type = host_to_target_flock(fl->l_type);
7030     __put_user(l_type, &target_fl->l_type);
7031     __put_user(fl->l_whence, &target_fl->l_whence);
7032     __put_user(fl->l_start, &target_fl->l_start);
7033     __put_user(fl->l_len, &target_fl->l_len);
7034     __put_user(fl->l_pid, &target_fl->l_pid);
7035     unlock_user_struct(target_fl, target_flock_addr, 1);
7036     return 0;
7037 }
7038 
7039 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7040 {
7041     struct flock64 fl64;
7042 #ifdef F_GETOWN_EX
7043     struct f_owner_ex fox;
7044     struct target_f_owner_ex *target_fox;
7045 #endif
7046     abi_long ret;
7047     int host_cmd = target_to_host_fcntl_cmd(cmd);
7048 
7049     if (host_cmd == -TARGET_EINVAL)
7050 	    return host_cmd;
7051 
7052     switch(cmd) {
7053     case TARGET_F_GETLK:
7054         ret = copy_from_user_flock(&fl64, arg);
7055         if (ret) {
7056             return ret;
7057         }
7058         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7059         if (ret == 0) {
7060             ret = copy_to_user_flock(arg, &fl64);
7061         }
7062         break;
7063 
7064     case TARGET_F_SETLK:
7065     case TARGET_F_SETLKW:
7066         ret = copy_from_user_flock(&fl64, arg);
7067         if (ret) {
7068             return ret;
7069         }
7070         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7071         break;
7072 
7073     case TARGET_F_GETLK64:
7074     case TARGET_F_OFD_GETLK:
7075         ret = copy_from_user_flock64(&fl64, arg);
7076         if (ret) {
7077             return ret;
7078         }
7079         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7080         if (ret == 0) {
7081             ret = copy_to_user_flock64(arg, &fl64);
7082         }
7083         break;
7084     case TARGET_F_SETLK64:
7085     case TARGET_F_SETLKW64:
7086     case TARGET_F_OFD_SETLK:
7087     case TARGET_F_OFD_SETLKW:
7088         ret = copy_from_user_flock64(&fl64, arg);
7089         if (ret) {
7090             return ret;
7091         }
7092         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7093         break;
7094 
7095     case TARGET_F_GETFL:
7096         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7097         if (ret >= 0) {
7098             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7099         }
7100         break;
7101 
7102     case TARGET_F_SETFL:
7103         ret = get_errno(safe_fcntl(fd, host_cmd,
7104                                    target_to_host_bitmask(arg,
7105                                                           fcntl_flags_tbl)));
7106         break;
7107 
7108 #ifdef F_GETOWN_EX
7109     case TARGET_F_GETOWN_EX:
7110         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7111         if (ret >= 0) {
7112             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7113                 return -TARGET_EFAULT;
7114             target_fox->type = tswap32(fox.type);
7115             target_fox->pid = tswap32(fox.pid);
7116             unlock_user_struct(target_fox, arg, 1);
7117         }
7118         break;
7119 #endif
7120 
7121 #ifdef F_SETOWN_EX
7122     case TARGET_F_SETOWN_EX:
7123         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7124             return -TARGET_EFAULT;
7125         fox.type = tswap32(target_fox->type);
7126         fox.pid = tswap32(target_fox->pid);
7127         unlock_user_struct(target_fox, arg, 0);
7128         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7129         break;
7130 #endif
7131 
7132     case TARGET_F_SETSIG:
7133         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7134         break;
7135 
7136     case TARGET_F_GETSIG:
7137         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7138         break;
7139 
7140     case TARGET_F_SETOWN:
7141     case TARGET_F_GETOWN:
7142     case TARGET_F_SETLEASE:
7143     case TARGET_F_GETLEASE:
7144     case TARGET_F_SETPIPE_SZ:
7145     case TARGET_F_GETPIPE_SZ:
7146     case TARGET_F_ADD_SEALS:
7147     case TARGET_F_GET_SEALS:
7148         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7149         break;
7150 
7151     default:
7152         ret = get_errno(safe_fcntl(fd, cmd, arg));
7153         break;
7154     }
7155     return ret;
7156 }
7157 
7158 #ifdef USE_UID16
7159 
7160 static inline int high2lowuid(int uid)
7161 {
7162     if (uid > 65535)
7163         return 65534;
7164     else
7165         return uid;
7166 }
7167 
7168 static inline int high2lowgid(int gid)
7169 {
7170     if (gid > 65535)
7171         return 65534;
7172     else
7173         return gid;
7174 }
7175 
7176 static inline int low2highuid(int uid)
7177 {
7178     if ((int16_t)uid == -1)
7179         return -1;
7180     else
7181         return uid;
7182 }
7183 
7184 static inline int low2highgid(int gid)
7185 {
7186     if ((int16_t)gid == -1)
7187         return -1;
7188     else
7189         return gid;
7190 }
7191 static inline int tswapid(int id)
7192 {
7193     return tswap16(id);
7194 }
7195 
7196 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7197 
7198 #else /* !USE_UID16 */
7199 static inline int high2lowuid(int uid)
7200 {
7201     return uid;
7202 }
7203 static inline int high2lowgid(int gid)
7204 {
7205     return gid;
7206 }
7207 static inline int low2highuid(int uid)
7208 {
7209     return uid;
7210 }
7211 static inline int low2highgid(int gid)
7212 {
7213     return gid;
7214 }
7215 static inline int tswapid(int id)
7216 {
7217     return tswap32(id);
7218 }
7219 
7220 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7221 
7222 #endif /* USE_UID16 */
7223 
7224 /* We must do direct syscalls for setting UID/GID, because we want to
7225  * implement the Linux system call semantics of "change only for this thread",
7226  * not the libc/POSIX semantics of "change for all threads in process".
7227  * (See http://ewontfix.com/17/ for more details.)
7228  * We use the 32-bit version of the syscalls if present; if it is not
7229  * then either the host architecture supports 32-bit UIDs natively with
7230  * the standard syscall, or the 16-bit UID is the best we can do.
7231  */
7232 #ifdef __NR_setuid32
7233 #define __NR_sys_setuid __NR_setuid32
7234 #else
7235 #define __NR_sys_setuid __NR_setuid
7236 #endif
7237 #ifdef __NR_setgid32
7238 #define __NR_sys_setgid __NR_setgid32
7239 #else
7240 #define __NR_sys_setgid __NR_setgid
7241 #endif
7242 #ifdef __NR_setresuid32
7243 #define __NR_sys_setresuid __NR_setresuid32
7244 #else
7245 #define __NR_sys_setresuid __NR_setresuid
7246 #endif
7247 #ifdef __NR_setresgid32
7248 #define __NR_sys_setresgid __NR_setresgid32
7249 #else
7250 #define __NR_sys_setresgid __NR_setresgid
7251 #endif
7252 
7253 _syscall1(int, sys_setuid, uid_t, uid)
7254 _syscall1(int, sys_setgid, gid_t, gid)
7255 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7256 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7257 
7258 void syscall_init(void)
7259 {
7260     IOCTLEntry *ie;
7261     const argtype *arg_type;
7262     int size;
7263 
7264     thunk_init(STRUCT_MAX);
7265 
7266 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7267 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7268 #include "syscall_types.h"
7269 #undef STRUCT
7270 #undef STRUCT_SPECIAL
7271 
7272     /* we patch the ioctl size if necessary. We rely on the fact that
7273        no ioctl has all the bits at '1' in the size field */
7274     ie = ioctl_entries;
7275     while (ie->target_cmd != 0) {
7276         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7277             TARGET_IOC_SIZEMASK) {
7278             arg_type = ie->arg_type;
7279             if (arg_type[0] != TYPE_PTR) {
7280                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7281                         ie->target_cmd);
7282                 exit(1);
7283             }
7284             arg_type++;
7285             size = thunk_type_size(arg_type, 0);
7286             ie->target_cmd = (ie->target_cmd &
7287                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7288                 (size << TARGET_IOC_SIZESHIFT);
7289         }
7290 
7291         /* automatic consistency check if same arch */
7292 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7293     (defined(__x86_64__) && defined(TARGET_X86_64))
7294         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7295             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7296                     ie->name, ie->target_cmd, ie->host_cmd);
7297         }
7298 #endif
7299         ie++;
7300     }
7301 }
7302 
7303 #ifdef TARGET_NR_truncate64
7304 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7305                                          abi_long arg2,
7306                                          abi_long arg3,
7307                                          abi_long arg4)
7308 {
7309     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7310         arg2 = arg3;
7311         arg3 = arg4;
7312     }
7313     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7314 }
7315 #endif
7316 
7317 #ifdef TARGET_NR_ftruncate64
7318 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7319                                           abi_long arg2,
7320                                           abi_long arg3,
7321                                           abi_long arg4)
7322 {
7323     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7324         arg2 = arg3;
7325         arg3 = arg4;
7326     }
7327     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7328 }
7329 #endif
7330 
7331 #if defined(TARGET_NR_timer_settime) || \
7332     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7333 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7334                                                  abi_ulong target_addr)
7335 {
7336     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7337                                 offsetof(struct target_itimerspec,
7338                                          it_interval)) ||
7339         target_to_host_timespec(&host_its->it_value, target_addr +
7340                                 offsetof(struct target_itimerspec,
7341                                          it_value))) {
7342         return -TARGET_EFAULT;
7343     }
7344 
7345     return 0;
7346 }
7347 #endif
7348 
7349 #if defined(TARGET_NR_timer_settime64) || \
7350     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7351 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7352                                                    abi_ulong target_addr)
7353 {
7354     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7355                                   offsetof(struct target__kernel_itimerspec,
7356                                            it_interval)) ||
7357         target_to_host_timespec64(&host_its->it_value, target_addr +
7358                                   offsetof(struct target__kernel_itimerspec,
7359                                            it_value))) {
7360         return -TARGET_EFAULT;
7361     }
7362 
7363     return 0;
7364 }
7365 #endif
7366 
7367 #if ((defined(TARGET_NR_timerfd_gettime) || \
7368       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7369       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7370 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7371                                                  struct itimerspec *host_its)
7372 {
7373     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7374                                                        it_interval),
7375                                 &host_its->it_interval) ||
7376         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7377                                                        it_value),
7378                                 &host_its->it_value)) {
7379         return -TARGET_EFAULT;
7380     }
7381     return 0;
7382 }
7383 #endif
7384 
7385 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7386       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7387       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7388 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7389                                                    struct itimerspec *host_its)
7390 {
7391     if (host_to_target_timespec64(target_addr +
7392                                   offsetof(struct target__kernel_itimerspec,
7393                                            it_interval),
7394                                   &host_its->it_interval) ||
7395         host_to_target_timespec64(target_addr +
7396                                   offsetof(struct target__kernel_itimerspec,
7397                                            it_value),
7398                                   &host_its->it_value)) {
7399         return -TARGET_EFAULT;
7400     }
7401     return 0;
7402 }
7403 #endif
7404 
7405 #if defined(TARGET_NR_adjtimex) || \
7406     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7407 static inline abi_long target_to_host_timex(struct timex *host_tx,
7408                                             abi_long target_addr)
7409 {
7410     struct target_timex *target_tx;
7411 
7412     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7413         return -TARGET_EFAULT;
7414     }
7415 
7416     __get_user(host_tx->modes, &target_tx->modes);
7417     __get_user(host_tx->offset, &target_tx->offset);
7418     __get_user(host_tx->freq, &target_tx->freq);
7419     __get_user(host_tx->maxerror, &target_tx->maxerror);
7420     __get_user(host_tx->esterror, &target_tx->esterror);
7421     __get_user(host_tx->status, &target_tx->status);
7422     __get_user(host_tx->constant, &target_tx->constant);
7423     __get_user(host_tx->precision, &target_tx->precision);
7424     __get_user(host_tx->tolerance, &target_tx->tolerance);
7425     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7426     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7427     __get_user(host_tx->tick, &target_tx->tick);
7428     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7429     __get_user(host_tx->jitter, &target_tx->jitter);
7430     __get_user(host_tx->shift, &target_tx->shift);
7431     __get_user(host_tx->stabil, &target_tx->stabil);
7432     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7433     __get_user(host_tx->calcnt, &target_tx->calcnt);
7434     __get_user(host_tx->errcnt, &target_tx->errcnt);
7435     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7436     __get_user(host_tx->tai, &target_tx->tai);
7437 
7438     unlock_user_struct(target_tx, target_addr, 0);
7439     return 0;
7440 }
7441 
7442 static inline abi_long host_to_target_timex(abi_long target_addr,
7443                                             struct timex *host_tx)
7444 {
7445     struct target_timex *target_tx;
7446 
7447     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7448         return -TARGET_EFAULT;
7449     }
7450 
7451     __put_user(host_tx->modes, &target_tx->modes);
7452     __put_user(host_tx->offset, &target_tx->offset);
7453     __put_user(host_tx->freq, &target_tx->freq);
7454     __put_user(host_tx->maxerror, &target_tx->maxerror);
7455     __put_user(host_tx->esterror, &target_tx->esterror);
7456     __put_user(host_tx->status, &target_tx->status);
7457     __put_user(host_tx->constant, &target_tx->constant);
7458     __put_user(host_tx->precision, &target_tx->precision);
7459     __put_user(host_tx->tolerance, &target_tx->tolerance);
7460     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7461     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7462     __put_user(host_tx->tick, &target_tx->tick);
7463     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7464     __put_user(host_tx->jitter, &target_tx->jitter);
7465     __put_user(host_tx->shift, &target_tx->shift);
7466     __put_user(host_tx->stabil, &target_tx->stabil);
7467     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7468     __put_user(host_tx->calcnt, &target_tx->calcnt);
7469     __put_user(host_tx->errcnt, &target_tx->errcnt);
7470     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7471     __put_user(host_tx->tai, &target_tx->tai);
7472 
7473     unlock_user_struct(target_tx, target_addr, 1);
7474     return 0;
7475 }
7476 #endif
7477 
7478 
7479 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7480 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7481                                               abi_long target_addr)
7482 {
7483     struct target__kernel_timex *target_tx;
7484 
7485     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7486                                  offsetof(struct target__kernel_timex,
7487                                           time))) {
7488         return -TARGET_EFAULT;
7489     }
7490 
7491     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7492         return -TARGET_EFAULT;
7493     }
7494 
7495     __get_user(host_tx->modes, &target_tx->modes);
7496     __get_user(host_tx->offset, &target_tx->offset);
7497     __get_user(host_tx->freq, &target_tx->freq);
7498     __get_user(host_tx->maxerror, &target_tx->maxerror);
7499     __get_user(host_tx->esterror, &target_tx->esterror);
7500     __get_user(host_tx->status, &target_tx->status);
7501     __get_user(host_tx->constant, &target_tx->constant);
7502     __get_user(host_tx->precision, &target_tx->precision);
7503     __get_user(host_tx->tolerance, &target_tx->tolerance);
7504     __get_user(host_tx->tick, &target_tx->tick);
7505     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7506     __get_user(host_tx->jitter, &target_tx->jitter);
7507     __get_user(host_tx->shift, &target_tx->shift);
7508     __get_user(host_tx->stabil, &target_tx->stabil);
7509     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7510     __get_user(host_tx->calcnt, &target_tx->calcnt);
7511     __get_user(host_tx->errcnt, &target_tx->errcnt);
7512     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7513     __get_user(host_tx->tai, &target_tx->tai);
7514 
7515     unlock_user_struct(target_tx, target_addr, 0);
7516     return 0;
7517 }
7518 
7519 static inline abi_long host_to_target_timex64(abi_long target_addr,
7520                                               struct timex *host_tx)
7521 {
7522     struct target__kernel_timex *target_tx;
7523 
7524    if (copy_to_user_timeval64(target_addr +
7525                               offsetof(struct target__kernel_timex, time),
7526                               &host_tx->time)) {
7527         return -TARGET_EFAULT;
7528     }
7529 
7530     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7531         return -TARGET_EFAULT;
7532     }
7533 
7534     __put_user(host_tx->modes, &target_tx->modes);
7535     __put_user(host_tx->offset, &target_tx->offset);
7536     __put_user(host_tx->freq, &target_tx->freq);
7537     __put_user(host_tx->maxerror, &target_tx->maxerror);
7538     __put_user(host_tx->esterror, &target_tx->esterror);
7539     __put_user(host_tx->status, &target_tx->status);
7540     __put_user(host_tx->constant, &target_tx->constant);
7541     __put_user(host_tx->precision, &target_tx->precision);
7542     __put_user(host_tx->tolerance, &target_tx->tolerance);
7543     __put_user(host_tx->tick, &target_tx->tick);
7544     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7545     __put_user(host_tx->jitter, &target_tx->jitter);
7546     __put_user(host_tx->shift, &target_tx->shift);
7547     __put_user(host_tx->stabil, &target_tx->stabil);
7548     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7549     __put_user(host_tx->calcnt, &target_tx->calcnt);
7550     __put_user(host_tx->errcnt, &target_tx->errcnt);
7551     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7552     __put_user(host_tx->tai, &target_tx->tai);
7553 
7554     unlock_user_struct(target_tx, target_addr, 1);
7555     return 0;
7556 }
7557 #endif
7558 
7559 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7560 #define sigev_notify_thread_id _sigev_un._tid
7561 #endif
7562 
7563 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7564                                                abi_ulong target_addr)
7565 {
7566     struct target_sigevent *target_sevp;
7567 
7568     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7569         return -TARGET_EFAULT;
7570     }
7571 
7572     /* This union is awkward on 64 bit systems because it has a 32 bit
7573      * integer and a pointer in it; we follow the conversion approach
7574      * used for handling sigval types in signal.c so the guest should get
7575      * the correct value back even if we did a 64 bit byteswap and it's
7576      * using the 32 bit integer.
7577      */
7578     host_sevp->sigev_value.sival_ptr =
7579         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7580     host_sevp->sigev_signo =
7581         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7582     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7583     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7584 
7585     unlock_user_struct(target_sevp, target_addr, 1);
7586     return 0;
7587 }
7588 
7589 #if defined(TARGET_NR_mlockall)
7590 static inline int target_to_host_mlockall_arg(int arg)
7591 {
7592     int result = 0;
7593 
7594     if (arg & TARGET_MCL_CURRENT) {
7595         result |= MCL_CURRENT;
7596     }
7597     if (arg & TARGET_MCL_FUTURE) {
7598         result |= MCL_FUTURE;
7599     }
7600 #ifdef MCL_ONFAULT
7601     if (arg & TARGET_MCL_ONFAULT) {
7602         result |= MCL_ONFAULT;
7603     }
7604 #endif
7605 
7606     return result;
7607 }
7608 #endif
7609 
7610 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7611      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7612      defined(TARGET_NR_newfstatat))
7613 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7614                                              abi_ulong target_addr,
7615                                              struct stat *host_st)
7616 {
7617 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7618     if (cpu_env->eabi) {
7619         struct target_eabi_stat64 *target_st;
7620 
7621         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7622             return -TARGET_EFAULT;
7623         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7624         __put_user(host_st->st_dev, &target_st->st_dev);
7625         __put_user(host_st->st_ino, &target_st->st_ino);
7626 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7627         __put_user(host_st->st_ino, &target_st->__st_ino);
7628 #endif
7629         __put_user(host_st->st_mode, &target_st->st_mode);
7630         __put_user(host_st->st_nlink, &target_st->st_nlink);
7631         __put_user(host_st->st_uid, &target_st->st_uid);
7632         __put_user(host_st->st_gid, &target_st->st_gid);
7633         __put_user(host_st->st_rdev, &target_st->st_rdev);
7634         __put_user(host_st->st_size, &target_st->st_size);
7635         __put_user(host_st->st_blksize, &target_st->st_blksize);
7636         __put_user(host_st->st_blocks, &target_st->st_blocks);
7637         __put_user(host_st->st_atime, &target_st->target_st_atime);
7638         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7639         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7640 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7641         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7642         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7643         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7644 #endif
7645         unlock_user_struct(target_st, target_addr, 1);
7646     } else
7647 #endif
7648     {
7649 #if defined(TARGET_HAS_STRUCT_STAT64)
7650         struct target_stat64 *target_st;
7651 #else
7652         struct target_stat *target_st;
7653 #endif
7654 
7655         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7656             return -TARGET_EFAULT;
7657         memset(target_st, 0, sizeof(*target_st));
7658         __put_user(host_st->st_dev, &target_st->st_dev);
7659         __put_user(host_st->st_ino, &target_st->st_ino);
7660 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7661         __put_user(host_st->st_ino, &target_st->__st_ino);
7662 #endif
7663         __put_user(host_st->st_mode, &target_st->st_mode);
7664         __put_user(host_st->st_nlink, &target_st->st_nlink);
7665         __put_user(host_st->st_uid, &target_st->st_uid);
7666         __put_user(host_st->st_gid, &target_st->st_gid);
7667         __put_user(host_st->st_rdev, &target_st->st_rdev);
7668         /* XXX: better use of kernel struct */
7669         __put_user(host_st->st_size, &target_st->st_size);
7670         __put_user(host_st->st_blksize, &target_st->st_blksize);
7671         __put_user(host_st->st_blocks, &target_st->st_blocks);
7672         __put_user(host_st->st_atime, &target_st->target_st_atime);
7673         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7674         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7675 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7676         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7677         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7678         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7679 #endif
7680         unlock_user_struct(target_st, target_addr, 1);
7681     }
7682 
7683     return 0;
7684 }
7685 #endif
7686 
7687 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7688 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7689                                             abi_ulong target_addr)
7690 {
7691     struct target_statx *target_stx;
7692 
7693     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7694         return -TARGET_EFAULT;
7695     }
7696     memset(target_stx, 0, sizeof(*target_stx));
7697 
7698     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7699     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7700     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7701     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7702     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7703     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7704     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7705     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7706     __put_user(host_stx->stx_size, &target_stx->stx_size);
7707     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7708     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7709     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7710     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7711     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7712     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7713     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7714     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7715     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7716     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7717     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7718     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7719     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7720     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7721 
7722     unlock_user_struct(target_stx, target_addr, 1);
7723 
7724     return 0;
7725 }
7726 #endif
7727 
7728 static int do_sys_futex(int *uaddr, int op, int val,
7729                          const struct timespec *timeout, int *uaddr2,
7730                          int val3)
7731 {
7732 #if HOST_LONG_BITS == 64
7733 #if defined(__NR_futex)
7734     /* always a 64-bit time_t, it doesn't define _time64 version  */
7735     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7736 
7737 #endif
7738 #else /* HOST_LONG_BITS == 64 */
7739 #if defined(__NR_futex_time64)
7740     if (sizeof(timeout->tv_sec) == 8) {
7741         /* _time64 function on 32bit arch */
7742         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7743     }
7744 #endif
7745 #if defined(__NR_futex)
7746     /* old function on 32bit arch */
7747     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7748 #endif
7749 #endif /* HOST_LONG_BITS == 64 */
7750     g_assert_not_reached();
7751 }
7752 
7753 static int do_safe_futex(int *uaddr, int op, int val,
7754                          const struct timespec *timeout, int *uaddr2,
7755                          int val3)
7756 {
7757 #if HOST_LONG_BITS == 64
7758 #if defined(__NR_futex)
7759     /* always a 64-bit time_t, it doesn't define _time64 version  */
7760     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7761 #endif
7762 #else /* HOST_LONG_BITS == 64 */
7763 #if defined(__NR_futex_time64)
7764     if (sizeof(timeout->tv_sec) == 8) {
7765         /* _time64 function on 32bit arch */
7766         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7767                                            val3));
7768     }
7769 #endif
7770 #if defined(__NR_futex)
7771     /* old function on 32bit arch */
7772     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7773 #endif
7774 #endif /* HOST_LONG_BITS == 64 */
7775     return -TARGET_ENOSYS;
7776 }
7777 
7778 /* ??? Using host futex calls even when target atomic operations
7779    are not really atomic probably breaks things.  However implementing
7780    futexes locally would make futexes shared between multiple processes
7781    tricky.  However they're probably useless because guest atomic
7782    operations won't work either.  */
7783 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7784 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7785                     int op, int val, target_ulong timeout,
7786                     target_ulong uaddr2, int val3)
7787 {
7788     struct timespec ts, *pts = NULL;
7789     void *haddr2 = NULL;
7790     int base_op;
7791 
7792     /* We assume FUTEX_* constants are the same on both host and target. */
7793 #ifdef FUTEX_CMD_MASK
7794     base_op = op & FUTEX_CMD_MASK;
7795 #else
7796     base_op = op;
7797 #endif
7798     switch (base_op) {
7799     case FUTEX_WAIT:
7800     case FUTEX_WAIT_BITSET:
7801         val = tswap32(val);
7802         break;
7803     case FUTEX_WAIT_REQUEUE_PI:
7804         val = tswap32(val);
7805         haddr2 = g2h(cpu, uaddr2);
7806         break;
7807     case FUTEX_LOCK_PI:
7808     case FUTEX_LOCK_PI2:
7809         break;
7810     case FUTEX_WAKE:
7811     case FUTEX_WAKE_BITSET:
7812     case FUTEX_TRYLOCK_PI:
7813     case FUTEX_UNLOCK_PI:
7814         timeout = 0;
7815         break;
7816     case FUTEX_FD:
7817         val = target_to_host_signal(val);
7818         timeout = 0;
7819         break;
7820     case FUTEX_CMP_REQUEUE:
7821     case FUTEX_CMP_REQUEUE_PI:
7822         val3 = tswap32(val3);
7823         /* fall through */
7824     case FUTEX_REQUEUE:
7825     case FUTEX_WAKE_OP:
7826         /*
7827          * For these, the 4th argument is not TIMEOUT, but VAL2.
7828          * But the prototype of do_safe_futex takes a pointer, so
7829          * insert casts to satisfy the compiler.  We do not need
7830          * to tswap VAL2 since it's not compared to guest memory.
7831           */
7832         pts = (struct timespec *)(uintptr_t)timeout;
7833         timeout = 0;
7834         haddr2 = g2h(cpu, uaddr2);
7835         break;
7836     default:
7837         return -TARGET_ENOSYS;
7838     }
7839     if (timeout) {
7840         pts = &ts;
7841         if (time64
7842             ? target_to_host_timespec64(pts, timeout)
7843             : target_to_host_timespec(pts, timeout)) {
7844             return -TARGET_EFAULT;
7845         }
7846     }
7847     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7848 }
7849 #endif
7850 
7851 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7852 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7853                                      abi_long handle, abi_long mount_id,
7854                                      abi_long flags)
7855 {
7856     struct file_handle *target_fh;
7857     struct file_handle *fh;
7858     int mid = 0;
7859     abi_long ret;
7860     char *name;
7861     unsigned int size, total_size;
7862 
7863     if (get_user_s32(size, handle)) {
7864         return -TARGET_EFAULT;
7865     }
7866 
7867     name = lock_user_string(pathname);
7868     if (!name) {
7869         return -TARGET_EFAULT;
7870     }
7871 
7872     total_size = sizeof(struct file_handle) + size;
7873     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7874     if (!target_fh) {
7875         unlock_user(name, pathname, 0);
7876         return -TARGET_EFAULT;
7877     }
7878 
7879     fh = g_malloc0(total_size);
7880     fh->handle_bytes = size;
7881 
7882     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7883     unlock_user(name, pathname, 0);
7884 
7885     /* man name_to_handle_at(2):
7886      * Other than the use of the handle_bytes field, the caller should treat
7887      * the file_handle structure as an opaque data type
7888      */
7889 
7890     memcpy(target_fh, fh, total_size);
7891     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7892     target_fh->handle_type = tswap32(fh->handle_type);
7893     g_free(fh);
7894     unlock_user(target_fh, handle, total_size);
7895 
7896     if (put_user_s32(mid, mount_id)) {
7897         return -TARGET_EFAULT;
7898     }
7899 
7900     return ret;
7901 
7902 }
7903 #endif
7904 
7905 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7906 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7907                                      abi_long flags)
7908 {
7909     struct file_handle *target_fh;
7910     struct file_handle *fh;
7911     unsigned int size, total_size;
7912     abi_long ret;
7913 
7914     if (get_user_s32(size, handle)) {
7915         return -TARGET_EFAULT;
7916     }
7917 
7918     total_size = sizeof(struct file_handle) + size;
7919     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7920     if (!target_fh) {
7921         return -TARGET_EFAULT;
7922     }
7923 
7924     fh = g_memdup(target_fh, total_size);
7925     fh->handle_bytes = size;
7926     fh->handle_type = tswap32(target_fh->handle_type);
7927 
7928     ret = get_errno(open_by_handle_at(mount_fd, fh,
7929                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7930 
7931     g_free(fh);
7932 
7933     unlock_user(target_fh, handle, total_size);
7934 
7935     return ret;
7936 }
7937 #endif
7938 
7939 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7940 
7941 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7942 {
7943     int host_flags;
7944     target_sigset_t *target_mask;
7945     sigset_t host_mask;
7946     abi_long ret;
7947 
7948     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7949         return -TARGET_EINVAL;
7950     }
7951     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7952         return -TARGET_EFAULT;
7953     }
7954 
7955     target_to_host_sigset(&host_mask, target_mask);
7956 
7957     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7958 
7959     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7960     if (ret >= 0) {
7961         fd_trans_register(ret, &target_signalfd_trans);
7962     }
7963 
7964     unlock_user_struct(target_mask, mask, 0);
7965 
7966     return ret;
7967 }
7968 #endif
7969 
7970 /* Map host to target signal numbers for the wait family of syscalls.
7971    Assume all other status bits are the same.  */
7972 int host_to_target_waitstatus(int status)
7973 {
7974     if (WIFSIGNALED(status)) {
7975         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7976     }
7977     if (WIFSTOPPED(status)) {
7978         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7979                | (status & 0xff);
7980     }
7981     return status;
7982 }
7983 
7984 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7985 {
7986     CPUState *cpu = env_cpu(cpu_env);
7987     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7988     int i;
7989 
7990     for (i = 0; i < bprm->argc; i++) {
7991         size_t len = strlen(bprm->argv[i]) + 1;
7992 
7993         if (write(fd, bprm->argv[i], len) != len) {
7994             return -1;
7995         }
7996     }
7997 
7998     return 0;
7999 }
8000 
8001 static int open_self_maps(CPUArchState *cpu_env, int fd)
8002 {
8003     CPUState *cpu = env_cpu(cpu_env);
8004     TaskState *ts = cpu->opaque;
8005     GSList *map_info = read_self_maps();
8006     GSList *s;
8007     int count;
8008 
8009     for (s = map_info; s; s = g_slist_next(s)) {
8010         MapInfo *e = (MapInfo *) s->data;
8011 
8012         if (h2g_valid(e->start)) {
8013             unsigned long min = e->start;
8014             unsigned long max = e->end;
8015             int flags = page_get_flags(h2g(min));
8016             const char *path;
8017 
8018             max = h2g_valid(max - 1) ?
8019                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8020 
8021             if (page_check_range(h2g(min), max - min, flags) == -1) {
8022                 continue;
8023             }
8024 
8025 #ifdef TARGET_HPPA
8026             if (h2g(max) == ts->info->stack_limit) {
8027 #else
8028             if (h2g(min) == ts->info->stack_limit) {
8029 #endif
8030                 path = "[stack]";
8031             } else {
8032                 path = e->path;
8033             }
8034 
8035             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8036                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8037                             h2g(min), h2g(max - 1) + 1,
8038                             (flags & PAGE_READ) ? 'r' : '-',
8039                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8040                             (flags & PAGE_EXEC) ? 'x' : '-',
8041                             e->is_priv ? 'p' : 's',
8042                             (uint64_t) e->offset, e->dev, e->inode);
8043             if (path) {
8044                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8045             } else {
8046                 dprintf(fd, "\n");
8047             }
8048         }
8049     }
8050 
8051     free_self_maps(map_info);
8052 
8053 #ifdef TARGET_VSYSCALL_PAGE
8054     /*
8055      * We only support execution from the vsyscall page.
8056      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8057      */
8058     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8059                     " --xp 00000000 00:00 0",
8060                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8061     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8062 #endif
8063 
8064     return 0;
8065 }
8066 
8067 static int open_self_stat(CPUArchState *cpu_env, int fd)
8068 {
8069     CPUState *cpu = env_cpu(cpu_env);
8070     TaskState *ts = cpu->opaque;
8071     g_autoptr(GString) buf = g_string_new(NULL);
8072     int i;
8073 
8074     for (i = 0; i < 44; i++) {
8075         if (i == 0) {
8076             /* pid */
8077             g_string_printf(buf, FMT_pid " ", getpid());
8078         } else if (i == 1) {
8079             /* app name */
8080             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8081             bin = bin ? bin + 1 : ts->bprm->argv[0];
8082             g_string_printf(buf, "(%.15s) ", bin);
8083         } else if (i == 2) {
8084             /* task state */
8085             g_string_assign(buf, "R "); /* we are running right now */
8086         } else if (i == 3) {
8087             /* ppid */
8088             g_string_printf(buf, FMT_pid " ", getppid());
8089         } else if (i == 21) {
8090             /* starttime */
8091             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8092         } else if (i == 27) {
8093             /* stack bottom */
8094             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8095         } else {
8096             /* for the rest, there is MasterCard */
8097             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8098         }
8099 
8100         if (write(fd, buf->str, buf->len) != buf->len) {
8101             return -1;
8102         }
8103     }
8104 
8105     return 0;
8106 }
8107 
8108 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8109 {
8110     CPUState *cpu = env_cpu(cpu_env);
8111     TaskState *ts = cpu->opaque;
8112     abi_ulong auxv = ts->info->saved_auxv;
8113     abi_ulong len = ts->info->auxv_len;
8114     char *ptr;
8115 
8116     /*
8117      * Auxiliary vector is stored in target process stack.
8118      * read in whole auxv vector and copy it to file
8119      */
8120     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8121     if (ptr != NULL) {
8122         while (len > 0) {
8123             ssize_t r;
8124             r = write(fd, ptr, len);
8125             if (r <= 0) {
8126                 break;
8127             }
8128             len -= r;
8129             ptr += r;
8130         }
8131         lseek(fd, 0, SEEK_SET);
8132         unlock_user(ptr, auxv, len);
8133     }
8134 
8135     return 0;
8136 }
8137 
8138 static int is_proc_myself(const char *filename, const char *entry)
8139 {
8140     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8141         filename += strlen("/proc/");
8142         if (!strncmp(filename, "self/", strlen("self/"))) {
8143             filename += strlen("self/");
8144         } else if (*filename >= '1' && *filename <= '9') {
8145             char myself[80];
8146             snprintf(myself, sizeof(myself), "%d/", getpid());
8147             if (!strncmp(filename, myself, strlen(myself))) {
8148                 filename += strlen(myself);
8149             } else {
8150                 return 0;
8151             }
8152         } else {
8153             return 0;
8154         }
8155         if (!strcmp(filename, entry)) {
8156             return 1;
8157         }
8158     }
8159     return 0;
8160 }
8161 
8162 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8163                       const char *fmt, int code)
8164 {
8165     if (logfile) {
8166         CPUState *cs = env_cpu(env);
8167 
8168         fprintf(logfile, fmt, code);
8169         fprintf(logfile, "Failing executable: %s\n", exec_path);
8170         cpu_dump_state(cs, logfile, 0);
8171         open_self_maps(env, fileno(logfile));
8172     }
8173 }
8174 
8175 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8176 {
8177     /* dump to console */
8178     excp_dump_file(stderr, env, fmt, code);
8179 
8180     /* dump to log file */
8181     if (qemu_log_separate()) {
8182         FILE *logfile = qemu_log_trylock();
8183 
8184         excp_dump_file(logfile, env, fmt, code);
8185         qemu_log_unlock(logfile);
8186     }
8187 }
8188 
8189 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8190     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8191 static int is_proc(const char *filename, const char *entry)
8192 {
8193     return strcmp(filename, entry) == 0;
8194 }
8195 #endif
8196 
8197 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8198 static int open_net_route(CPUArchState *cpu_env, int fd)
8199 {
8200     FILE *fp;
8201     char *line = NULL;
8202     size_t len = 0;
8203     ssize_t read;
8204 
8205     fp = fopen("/proc/net/route", "r");
8206     if (fp == NULL) {
8207         return -1;
8208     }
8209 
8210     /* read header */
8211 
8212     read = getline(&line, &len, fp);
8213     dprintf(fd, "%s", line);
8214 
8215     /* read routes */
8216 
8217     while ((read = getline(&line, &len, fp)) != -1) {
8218         char iface[16];
8219         uint32_t dest, gw, mask;
8220         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8221         int fields;
8222 
8223         fields = sscanf(line,
8224                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8225                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8226                         &mask, &mtu, &window, &irtt);
8227         if (fields != 11) {
8228             continue;
8229         }
8230         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8231                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8232                 metric, tswap32(mask), mtu, window, irtt);
8233     }
8234 
8235     free(line);
8236     fclose(fp);
8237 
8238     return 0;
8239 }
8240 #endif
8241 
8242 #if defined(TARGET_SPARC)
8243 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8244 {
8245     dprintf(fd, "type\t\t: sun4u\n");
8246     return 0;
8247 }
8248 #endif
8249 
8250 #if defined(TARGET_HPPA)
8251 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8252 {
8253     int i, num_cpus;
8254 
8255     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8256     for (i = 0; i < num_cpus; i++) {
8257         dprintf(fd, "processor\t: %d\n", i);
8258         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8259         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8260         dprintf(fd, "capabilities\t: os32\n");
8261         dprintf(fd, "model\t\t: 9000/778/B160L - "
8262                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8263     }
8264     return 0;
8265 }
8266 #endif
8267 
8268 #if defined(TARGET_M68K)
8269 static int open_hardware(CPUArchState *cpu_env, int fd)
8270 {
8271     dprintf(fd, "Model:\t\tqemu-m68k\n");
8272     return 0;
8273 }
8274 #endif
8275 
8276 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8277 {
8278     struct fake_open {
8279         const char *filename;
8280         int (*fill)(CPUArchState *cpu_env, int fd);
8281         int (*cmp)(const char *s1, const char *s2);
8282     };
8283     const struct fake_open *fake_open;
8284     static const struct fake_open fakes[] = {
8285         { "maps", open_self_maps, is_proc_myself },
8286         { "stat", open_self_stat, is_proc_myself },
8287         { "auxv", open_self_auxv, is_proc_myself },
8288         { "cmdline", open_self_cmdline, is_proc_myself },
8289 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8290         { "/proc/net/route", open_net_route, is_proc },
8291 #endif
8292 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8293         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8294 #endif
8295 #if defined(TARGET_M68K)
8296         { "/proc/hardware", open_hardware, is_proc },
8297 #endif
8298         { NULL, NULL, NULL }
8299     };
8300 
8301     if (is_proc_myself(pathname, "exe")) {
8302         return safe_openat(dirfd, exec_path, flags, mode);
8303     }
8304 
8305     for (fake_open = fakes; fake_open->filename; fake_open++) {
8306         if (fake_open->cmp(pathname, fake_open->filename)) {
8307             break;
8308         }
8309     }
8310 
8311     if (fake_open->filename) {
8312         const char *tmpdir;
8313         char filename[PATH_MAX];
8314         int fd, r;
8315 
8316         fd = memfd_create("qemu-open", 0);
8317         if (fd < 0) {
8318             if (errno != ENOSYS) {
8319                 return fd;
8320             }
8321             /* create temporary file to map stat to */
8322             tmpdir = getenv("TMPDIR");
8323             if (!tmpdir)
8324                 tmpdir = "/tmp";
8325             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8326             fd = mkstemp(filename);
8327             if (fd < 0) {
8328                 return fd;
8329             }
8330             unlink(filename);
8331         }
8332 
8333         if ((r = fake_open->fill(cpu_env, fd))) {
8334             int e = errno;
8335             close(fd);
8336             errno = e;
8337             return r;
8338         }
8339         lseek(fd, 0, SEEK_SET);
8340 
8341         return fd;
8342     }
8343 
8344     return safe_openat(dirfd, path(pathname), flags, mode);
8345 }
8346 
8347 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8348                        abi_long pathname, abi_long guest_argp,
8349                        abi_long guest_envp, int flags)
8350 {
8351     int ret;
8352     char **argp, **envp;
8353     int argc, envc;
8354     abi_ulong gp;
8355     abi_ulong addr;
8356     char **q;
8357     void *p;
8358 
8359     argc = 0;
8360 
8361     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8362         if (get_user_ual(addr, gp)) {
8363             return -TARGET_EFAULT;
8364         }
8365         if (!addr) {
8366             break;
8367         }
8368         argc++;
8369     }
8370     envc = 0;
8371     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8372         if (get_user_ual(addr, gp)) {
8373             return -TARGET_EFAULT;
8374         }
8375         if (!addr) {
8376             break;
8377         }
8378         envc++;
8379     }
8380 
8381     argp = g_new0(char *, argc + 1);
8382     envp = g_new0(char *, envc + 1);
8383 
8384     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8385         if (get_user_ual(addr, gp)) {
8386             goto execve_efault;
8387         }
8388         if (!addr) {
8389             break;
8390         }
8391         *q = lock_user_string(addr);
8392         if (!*q) {
8393             goto execve_efault;
8394         }
8395     }
8396     *q = NULL;
8397 
8398     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8399         if (get_user_ual(addr, gp)) {
8400             goto execve_efault;
8401         }
8402         if (!addr) {
8403             break;
8404         }
8405         *q = lock_user_string(addr);
8406         if (!*q) {
8407             goto execve_efault;
8408         }
8409     }
8410     *q = NULL;
8411 
8412     /*
8413      * Although execve() is not an interruptible syscall it is
8414      * a special case where we must use the safe_syscall wrapper:
8415      * if we allow a signal to happen before we make the host
8416      * syscall then we will 'lose' it, because at the point of
8417      * execve the process leaves QEMU's control. So we use the
8418      * safe syscall wrapper to ensure that we either take the
8419      * signal as a guest signal, or else it does not happen
8420      * before the execve completes and makes it the other
8421      * program's problem.
8422      */
8423     p = lock_user_string(pathname);
8424     if (!p) {
8425         goto execve_efault;
8426     }
8427 
8428     if (is_proc_myself(p, "exe")) {
8429         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8430     } else {
8431         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8432     }
8433 
8434     unlock_user(p, pathname, 0);
8435 
8436     goto execve_end;
8437 
8438 execve_efault:
8439     ret = -TARGET_EFAULT;
8440 
8441 execve_end:
8442     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8443         if (get_user_ual(addr, gp) || !addr) {
8444             break;
8445         }
8446         unlock_user(*q, addr, 0);
8447     }
8448     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8449         if (get_user_ual(addr, gp) || !addr) {
8450             break;
8451         }
8452         unlock_user(*q, addr, 0);
8453     }
8454 
8455     g_free(argp);
8456     g_free(envp);
8457     return ret;
8458 }
8459 
8460 #define TIMER_MAGIC 0x0caf0000
8461 #define TIMER_MAGIC_MASK 0xffff0000
8462 
8463 /* Convert QEMU provided timer ID back to internal 16bit index format */
8464 static target_timer_t get_timer_id(abi_long arg)
8465 {
8466     target_timer_t timerid = arg;
8467 
8468     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8469         return -TARGET_EINVAL;
8470     }
8471 
8472     timerid &= 0xffff;
8473 
8474     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8475         return -TARGET_EINVAL;
8476     }
8477 
8478     return timerid;
8479 }
8480 
8481 static int target_to_host_cpu_mask(unsigned long *host_mask,
8482                                    size_t host_size,
8483                                    abi_ulong target_addr,
8484                                    size_t target_size)
8485 {
8486     unsigned target_bits = sizeof(abi_ulong) * 8;
8487     unsigned host_bits = sizeof(*host_mask) * 8;
8488     abi_ulong *target_mask;
8489     unsigned i, j;
8490 
8491     assert(host_size >= target_size);
8492 
8493     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8494     if (!target_mask) {
8495         return -TARGET_EFAULT;
8496     }
8497     memset(host_mask, 0, host_size);
8498 
8499     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8500         unsigned bit = i * target_bits;
8501         abi_ulong val;
8502 
8503         __get_user(val, &target_mask[i]);
8504         for (j = 0; j < target_bits; j++, bit++) {
8505             if (val & (1UL << j)) {
8506                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8507             }
8508         }
8509     }
8510 
8511     unlock_user(target_mask, target_addr, 0);
8512     return 0;
8513 }
8514 
8515 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8516                                    size_t host_size,
8517                                    abi_ulong target_addr,
8518                                    size_t target_size)
8519 {
8520     unsigned target_bits = sizeof(abi_ulong) * 8;
8521     unsigned host_bits = sizeof(*host_mask) * 8;
8522     abi_ulong *target_mask;
8523     unsigned i, j;
8524 
8525     assert(host_size >= target_size);
8526 
8527     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8528     if (!target_mask) {
8529         return -TARGET_EFAULT;
8530     }
8531 
8532     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8533         unsigned bit = i * target_bits;
8534         abi_ulong val = 0;
8535 
8536         for (j = 0; j < target_bits; j++, bit++) {
8537             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8538                 val |= 1UL << j;
8539             }
8540         }
8541         __put_user(val, &target_mask[i]);
8542     }
8543 
8544     unlock_user(target_mask, target_addr, target_size);
8545     return 0;
8546 }
8547 
8548 #ifdef TARGET_NR_getdents
8549 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8550 {
8551     g_autofree void *hdirp = NULL;
8552     void *tdirp;
8553     int hlen, hoff, toff;
8554     int hreclen, treclen;
8555     off64_t prev_diroff = 0;
8556 
8557     hdirp = g_try_malloc(count);
8558     if (!hdirp) {
8559         return -TARGET_ENOMEM;
8560     }
8561 
8562 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8563     hlen = sys_getdents(dirfd, hdirp, count);
8564 #else
8565     hlen = sys_getdents64(dirfd, hdirp, count);
8566 #endif
8567 
8568     hlen = get_errno(hlen);
8569     if (is_error(hlen)) {
8570         return hlen;
8571     }
8572 
8573     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8574     if (!tdirp) {
8575         return -TARGET_EFAULT;
8576     }
8577 
8578     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8579 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8580         struct linux_dirent *hde = hdirp + hoff;
8581 #else
8582         struct linux_dirent64 *hde = hdirp + hoff;
8583 #endif
8584         struct target_dirent *tde = tdirp + toff;
8585         int namelen;
8586         uint8_t type;
8587 
8588         namelen = strlen(hde->d_name);
8589         hreclen = hde->d_reclen;
8590         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8591         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8592 
8593         if (toff + treclen > count) {
8594             /*
8595              * If the host struct is smaller than the target struct, or
8596              * requires less alignment and thus packs into less space,
8597              * then the host can return more entries than we can pass
8598              * on to the guest.
8599              */
8600             if (toff == 0) {
8601                 toff = -TARGET_EINVAL; /* result buffer is too small */
8602                 break;
8603             }
8604             /*
8605              * Return what we have, resetting the file pointer to the
8606              * location of the first record not returned.
8607              */
8608             lseek64(dirfd, prev_diroff, SEEK_SET);
8609             break;
8610         }
8611 
8612         prev_diroff = hde->d_off;
8613         tde->d_ino = tswapal(hde->d_ino);
8614         tde->d_off = tswapal(hde->d_off);
8615         tde->d_reclen = tswap16(treclen);
8616         memcpy(tde->d_name, hde->d_name, namelen + 1);
8617 
8618         /*
8619          * The getdents type is in what was formerly a padding byte at the
8620          * end of the structure.
8621          */
8622 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8623         type = *((uint8_t *)hde + hreclen - 1);
8624 #else
8625         type = hde->d_type;
8626 #endif
8627         *((uint8_t *)tde + treclen - 1) = type;
8628     }
8629 
8630     unlock_user(tdirp, arg2, toff);
8631     return toff;
8632 }
8633 #endif /* TARGET_NR_getdents */
8634 
8635 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8636 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8637 {
8638     g_autofree void *hdirp = NULL;
8639     void *tdirp;
8640     int hlen, hoff, toff;
8641     int hreclen, treclen;
8642     off64_t prev_diroff = 0;
8643 
8644     hdirp = g_try_malloc(count);
8645     if (!hdirp) {
8646         return -TARGET_ENOMEM;
8647     }
8648 
8649     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8650     if (is_error(hlen)) {
8651         return hlen;
8652     }
8653 
8654     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8655     if (!tdirp) {
8656         return -TARGET_EFAULT;
8657     }
8658 
8659     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8660         struct linux_dirent64 *hde = hdirp + hoff;
8661         struct target_dirent64 *tde = tdirp + toff;
8662         int namelen;
8663 
8664         namelen = strlen(hde->d_name) + 1;
8665         hreclen = hde->d_reclen;
8666         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8667         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8668 
8669         if (toff + treclen > count) {
8670             /*
8671              * If the host struct is smaller than the target struct, or
8672              * requires less alignment and thus packs into less space,
8673              * then the host can return more entries than we can pass
8674              * on to the guest.
8675              */
8676             if (toff == 0) {
8677                 toff = -TARGET_EINVAL; /* result buffer is too small */
8678                 break;
8679             }
8680             /*
8681              * Return what we have, resetting the file pointer to the
8682              * location of the first record not returned.
8683              */
8684             lseek64(dirfd, prev_diroff, SEEK_SET);
8685             break;
8686         }
8687 
8688         prev_diroff = hde->d_off;
8689         tde->d_ino = tswap64(hde->d_ino);
8690         tde->d_off = tswap64(hde->d_off);
8691         tde->d_reclen = tswap16(treclen);
8692         tde->d_type = hde->d_type;
8693         memcpy(tde->d_name, hde->d_name, namelen);
8694     }
8695 
8696     unlock_user(tdirp, arg2, toff);
8697     return toff;
8698 }
8699 #endif /* TARGET_NR_getdents64 */
8700 
8701 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8702 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8703 #endif
8704 
8705 /* This is an internal helper for do_syscall so that it is easier
8706  * to have a single return point, so that actions, such as logging
8707  * of syscall results, can be performed.
8708  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8709  */
8710 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8711                             abi_long arg2, abi_long arg3, abi_long arg4,
8712                             abi_long arg5, abi_long arg6, abi_long arg7,
8713                             abi_long arg8)
8714 {
8715     CPUState *cpu = env_cpu(cpu_env);
8716     abi_long ret;
8717 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8718     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8719     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8720     || defined(TARGET_NR_statx)
8721     struct stat st;
8722 #endif
8723 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8724     || defined(TARGET_NR_fstatfs)
8725     struct statfs stfs;
8726 #endif
8727     void *p;
8728 
8729     switch(num) {
8730     case TARGET_NR_exit:
8731         /* In old applications this may be used to implement _exit(2).
8732            However in threaded applications it is used for thread termination,
8733            and _exit_group is used for application termination.
8734            Do thread termination if we have more then one thread.  */
8735 
8736         if (block_signals()) {
8737             return -QEMU_ERESTARTSYS;
8738         }
8739 
8740         pthread_mutex_lock(&clone_lock);
8741 
8742         if (CPU_NEXT(first_cpu)) {
8743             TaskState *ts = cpu->opaque;
8744 
8745             if (ts->child_tidptr) {
8746                 put_user_u32(0, ts->child_tidptr);
8747                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8748                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8749             }
8750 
8751             object_unparent(OBJECT(cpu));
8752             object_unref(OBJECT(cpu));
8753             /*
8754              * At this point the CPU should be unrealized and removed
8755              * from cpu lists. We can clean-up the rest of the thread
8756              * data without the lock held.
8757              */
8758 
8759             pthread_mutex_unlock(&clone_lock);
8760 
8761             thread_cpu = NULL;
8762             g_free(ts);
8763             rcu_unregister_thread();
8764             pthread_exit(NULL);
8765         }
8766 
8767         pthread_mutex_unlock(&clone_lock);
8768         preexit_cleanup(cpu_env, arg1);
8769         _exit(arg1);
8770         return 0; /* avoid warning */
8771     case TARGET_NR_read:
8772         if (arg2 == 0 && arg3 == 0) {
8773             return get_errno(safe_read(arg1, 0, 0));
8774         } else {
8775             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8776                 return -TARGET_EFAULT;
8777             ret = get_errno(safe_read(arg1, p, arg3));
8778             if (ret >= 0 &&
8779                 fd_trans_host_to_target_data(arg1)) {
8780                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8781             }
8782             unlock_user(p, arg2, ret);
8783         }
8784         return ret;
8785     case TARGET_NR_write:
8786         if (arg2 == 0 && arg3 == 0) {
8787             return get_errno(safe_write(arg1, 0, 0));
8788         }
8789         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8790             return -TARGET_EFAULT;
8791         if (fd_trans_target_to_host_data(arg1)) {
8792             void *copy = g_malloc(arg3);
8793             memcpy(copy, p, arg3);
8794             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8795             if (ret >= 0) {
8796                 ret = get_errno(safe_write(arg1, copy, ret));
8797             }
8798             g_free(copy);
8799         } else {
8800             ret = get_errno(safe_write(arg1, p, arg3));
8801         }
8802         unlock_user(p, arg2, 0);
8803         return ret;
8804 
8805 #ifdef TARGET_NR_open
8806     case TARGET_NR_open:
8807         if (!(p = lock_user_string(arg1)))
8808             return -TARGET_EFAULT;
8809         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8810                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8811                                   arg3));
8812         fd_trans_unregister(ret);
8813         unlock_user(p, arg1, 0);
8814         return ret;
8815 #endif
8816     case TARGET_NR_openat:
8817         if (!(p = lock_user_string(arg2)))
8818             return -TARGET_EFAULT;
8819         ret = get_errno(do_openat(cpu_env, arg1, p,
8820                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8821                                   arg4));
8822         fd_trans_unregister(ret);
8823         unlock_user(p, arg2, 0);
8824         return ret;
8825 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8826     case TARGET_NR_name_to_handle_at:
8827         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8828         return ret;
8829 #endif
8830 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8831     case TARGET_NR_open_by_handle_at:
8832         ret = do_open_by_handle_at(arg1, arg2, arg3);
8833         fd_trans_unregister(ret);
8834         return ret;
8835 #endif
8836 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8837     case TARGET_NR_pidfd_open:
8838         return get_errno(pidfd_open(arg1, arg2));
8839 #endif
8840 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8841     case TARGET_NR_pidfd_send_signal:
8842         {
8843             siginfo_t uinfo, *puinfo;
8844 
8845             if (arg3) {
8846                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8847                 if (!p) {
8848                     return -TARGET_EFAULT;
8849                  }
8850                  target_to_host_siginfo(&uinfo, p);
8851                  unlock_user(p, arg3, 0);
8852                  puinfo = &uinfo;
8853             } else {
8854                  puinfo = NULL;
8855             }
8856             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8857                                               puinfo, arg4));
8858         }
8859         return ret;
8860 #endif
8861 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8862     case TARGET_NR_pidfd_getfd:
8863         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8864 #endif
8865     case TARGET_NR_close:
8866         fd_trans_unregister(arg1);
8867         return get_errno(close(arg1));
8868 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8869     case TARGET_NR_close_range:
8870         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8871         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8872             abi_long fd, maxfd;
8873             maxfd = MIN(arg2, target_fd_max);
8874             for (fd = arg1; fd < maxfd; fd++) {
8875                 fd_trans_unregister(fd);
8876             }
8877         }
8878         return ret;
8879 #endif
8880 
8881     case TARGET_NR_brk:
8882         return do_brk(arg1);
8883 #ifdef TARGET_NR_fork
8884     case TARGET_NR_fork:
8885         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8886 #endif
8887 #ifdef TARGET_NR_waitpid
8888     case TARGET_NR_waitpid:
8889         {
8890             int status;
8891             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8892             if (!is_error(ret) && arg2 && ret
8893                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8894                 return -TARGET_EFAULT;
8895         }
8896         return ret;
8897 #endif
8898 #ifdef TARGET_NR_waitid
8899     case TARGET_NR_waitid:
8900         {
8901             siginfo_t info;
8902             info.si_pid = 0;
8903             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8904             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8905                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8906                     return -TARGET_EFAULT;
8907                 host_to_target_siginfo(p, &info);
8908                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8909             }
8910         }
8911         return ret;
8912 #endif
8913 #ifdef TARGET_NR_creat /* not on alpha */
8914     case TARGET_NR_creat:
8915         if (!(p = lock_user_string(arg1)))
8916             return -TARGET_EFAULT;
8917         ret = get_errno(creat(p, arg2));
8918         fd_trans_unregister(ret);
8919         unlock_user(p, arg1, 0);
8920         return ret;
8921 #endif
8922 #ifdef TARGET_NR_link
8923     case TARGET_NR_link:
8924         {
8925             void * p2;
8926             p = lock_user_string(arg1);
8927             p2 = lock_user_string(arg2);
8928             if (!p || !p2)
8929                 ret = -TARGET_EFAULT;
8930             else
8931                 ret = get_errno(link(p, p2));
8932             unlock_user(p2, arg2, 0);
8933             unlock_user(p, arg1, 0);
8934         }
8935         return ret;
8936 #endif
8937 #if defined(TARGET_NR_linkat)
8938     case TARGET_NR_linkat:
8939         {
8940             void * p2 = NULL;
8941             if (!arg2 || !arg4)
8942                 return -TARGET_EFAULT;
8943             p  = lock_user_string(arg2);
8944             p2 = lock_user_string(arg4);
8945             if (!p || !p2)
8946                 ret = -TARGET_EFAULT;
8947             else
8948                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8949             unlock_user(p, arg2, 0);
8950             unlock_user(p2, arg4, 0);
8951         }
8952         return ret;
8953 #endif
8954 #ifdef TARGET_NR_unlink
8955     case TARGET_NR_unlink:
8956         if (!(p = lock_user_string(arg1)))
8957             return -TARGET_EFAULT;
8958         ret = get_errno(unlink(p));
8959         unlock_user(p, arg1, 0);
8960         return ret;
8961 #endif
8962 #if defined(TARGET_NR_unlinkat)
8963     case TARGET_NR_unlinkat:
8964         if (!(p = lock_user_string(arg2)))
8965             return -TARGET_EFAULT;
8966         ret = get_errno(unlinkat(arg1, p, arg3));
8967         unlock_user(p, arg2, 0);
8968         return ret;
8969 #endif
8970     case TARGET_NR_execveat:
8971         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
8972     case TARGET_NR_execve:
8973         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
8974     case TARGET_NR_chdir:
8975         if (!(p = lock_user_string(arg1)))
8976             return -TARGET_EFAULT;
8977         ret = get_errno(chdir(p));
8978         unlock_user(p, arg1, 0);
8979         return ret;
8980 #ifdef TARGET_NR_time
8981     case TARGET_NR_time:
8982         {
8983             time_t host_time;
8984             ret = get_errno(time(&host_time));
8985             if (!is_error(ret)
8986                 && arg1
8987                 && put_user_sal(host_time, arg1))
8988                 return -TARGET_EFAULT;
8989         }
8990         return ret;
8991 #endif
8992 #ifdef TARGET_NR_mknod
8993     case TARGET_NR_mknod:
8994         if (!(p = lock_user_string(arg1)))
8995             return -TARGET_EFAULT;
8996         ret = get_errno(mknod(p, arg2, arg3));
8997         unlock_user(p, arg1, 0);
8998         return ret;
8999 #endif
9000 #if defined(TARGET_NR_mknodat)
9001     case TARGET_NR_mknodat:
9002         if (!(p = lock_user_string(arg2)))
9003             return -TARGET_EFAULT;
9004         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9005         unlock_user(p, arg2, 0);
9006         return ret;
9007 #endif
9008 #ifdef TARGET_NR_chmod
9009     case TARGET_NR_chmod:
9010         if (!(p = lock_user_string(arg1)))
9011             return -TARGET_EFAULT;
9012         ret = get_errno(chmod(p, arg2));
9013         unlock_user(p, arg1, 0);
9014         return ret;
9015 #endif
9016 #ifdef TARGET_NR_lseek
9017     case TARGET_NR_lseek:
9018         return get_errno(lseek(arg1, arg2, arg3));
9019 #endif
9020 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9021     /* Alpha specific */
9022     case TARGET_NR_getxpid:
9023         cpu_env->ir[IR_A4] = getppid();
9024         return get_errno(getpid());
9025 #endif
9026 #ifdef TARGET_NR_getpid
9027     case TARGET_NR_getpid:
9028         return get_errno(getpid());
9029 #endif
9030     case TARGET_NR_mount:
9031         {
9032             /* need to look at the data field */
9033             void *p2, *p3;
9034 
9035             if (arg1) {
9036                 p = lock_user_string(arg1);
9037                 if (!p) {
9038                     return -TARGET_EFAULT;
9039                 }
9040             } else {
9041                 p = NULL;
9042             }
9043 
9044             p2 = lock_user_string(arg2);
9045             if (!p2) {
9046                 if (arg1) {
9047                     unlock_user(p, arg1, 0);
9048                 }
9049                 return -TARGET_EFAULT;
9050             }
9051 
9052             if (arg3) {
9053                 p3 = lock_user_string(arg3);
9054                 if (!p3) {
9055                     if (arg1) {
9056                         unlock_user(p, arg1, 0);
9057                     }
9058                     unlock_user(p2, arg2, 0);
9059                     return -TARGET_EFAULT;
9060                 }
9061             } else {
9062                 p3 = NULL;
9063             }
9064 
9065             /* FIXME - arg5 should be locked, but it isn't clear how to
9066              * do that since it's not guaranteed to be a NULL-terminated
9067              * string.
9068              */
9069             if (!arg5) {
9070                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9071             } else {
9072                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9073             }
9074             ret = get_errno(ret);
9075 
9076             if (arg1) {
9077                 unlock_user(p, arg1, 0);
9078             }
9079             unlock_user(p2, arg2, 0);
9080             if (arg3) {
9081                 unlock_user(p3, arg3, 0);
9082             }
9083         }
9084         return ret;
9085 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9086 #if defined(TARGET_NR_umount)
9087     case TARGET_NR_umount:
9088 #endif
9089 #if defined(TARGET_NR_oldumount)
9090     case TARGET_NR_oldumount:
9091 #endif
9092         if (!(p = lock_user_string(arg1)))
9093             return -TARGET_EFAULT;
9094         ret = get_errno(umount(p));
9095         unlock_user(p, arg1, 0);
9096         return ret;
9097 #endif
9098 #ifdef TARGET_NR_stime /* not on alpha */
9099     case TARGET_NR_stime:
9100         {
9101             struct timespec ts;
9102             ts.tv_nsec = 0;
9103             if (get_user_sal(ts.tv_sec, arg1)) {
9104                 return -TARGET_EFAULT;
9105             }
9106             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9107         }
9108 #endif
9109 #ifdef TARGET_NR_alarm /* not on alpha */
9110     case TARGET_NR_alarm:
9111         return alarm(arg1);
9112 #endif
9113 #ifdef TARGET_NR_pause /* not on alpha */
9114     case TARGET_NR_pause:
9115         if (!block_signals()) {
9116             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9117         }
9118         return -TARGET_EINTR;
9119 #endif
9120 #ifdef TARGET_NR_utime
9121     case TARGET_NR_utime:
9122         {
9123             struct utimbuf tbuf, *host_tbuf;
9124             struct target_utimbuf *target_tbuf;
9125             if (arg2) {
9126                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9127                     return -TARGET_EFAULT;
9128                 tbuf.actime = tswapal(target_tbuf->actime);
9129                 tbuf.modtime = tswapal(target_tbuf->modtime);
9130                 unlock_user_struct(target_tbuf, arg2, 0);
9131                 host_tbuf = &tbuf;
9132             } else {
9133                 host_tbuf = NULL;
9134             }
9135             if (!(p = lock_user_string(arg1)))
9136                 return -TARGET_EFAULT;
9137             ret = get_errno(utime(p, host_tbuf));
9138             unlock_user(p, arg1, 0);
9139         }
9140         return ret;
9141 #endif
9142 #ifdef TARGET_NR_utimes
9143     case TARGET_NR_utimes:
9144         {
9145             struct timeval *tvp, tv[2];
9146             if (arg2) {
9147                 if (copy_from_user_timeval(&tv[0], arg2)
9148                     || copy_from_user_timeval(&tv[1],
9149                                               arg2 + sizeof(struct target_timeval)))
9150                     return -TARGET_EFAULT;
9151                 tvp = tv;
9152             } else {
9153                 tvp = NULL;
9154             }
9155             if (!(p = lock_user_string(arg1)))
9156                 return -TARGET_EFAULT;
9157             ret = get_errno(utimes(p, tvp));
9158             unlock_user(p, arg1, 0);
9159         }
9160         return ret;
9161 #endif
9162 #if defined(TARGET_NR_futimesat)
9163     case TARGET_NR_futimesat:
9164         {
9165             struct timeval *tvp, tv[2];
9166             if (arg3) {
9167                 if (copy_from_user_timeval(&tv[0], arg3)
9168                     || copy_from_user_timeval(&tv[1],
9169                                               arg3 + sizeof(struct target_timeval)))
9170                     return -TARGET_EFAULT;
9171                 tvp = tv;
9172             } else {
9173                 tvp = NULL;
9174             }
9175             if (!(p = lock_user_string(arg2))) {
9176                 return -TARGET_EFAULT;
9177             }
9178             ret = get_errno(futimesat(arg1, path(p), tvp));
9179             unlock_user(p, arg2, 0);
9180         }
9181         return ret;
9182 #endif
9183 #ifdef TARGET_NR_access
9184     case TARGET_NR_access:
9185         if (!(p = lock_user_string(arg1))) {
9186             return -TARGET_EFAULT;
9187         }
9188         ret = get_errno(access(path(p), arg2));
9189         unlock_user(p, arg1, 0);
9190         return ret;
9191 #endif
9192 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9193     case TARGET_NR_faccessat:
9194         if (!(p = lock_user_string(arg2))) {
9195             return -TARGET_EFAULT;
9196         }
9197         ret = get_errno(faccessat(arg1, p, arg3, 0));
9198         unlock_user(p, arg2, 0);
9199         return ret;
9200 #endif
9201 #if defined(TARGET_NR_faccessat2)
9202     case TARGET_NR_faccessat2:
9203         if (!(p = lock_user_string(arg2))) {
9204             return -TARGET_EFAULT;
9205         }
9206         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9207         unlock_user(p, arg2, 0);
9208         return ret;
9209 #endif
9210 #ifdef TARGET_NR_nice /* not on alpha */
9211     case TARGET_NR_nice:
9212         return get_errno(nice(arg1));
9213 #endif
9214     case TARGET_NR_sync:
9215         sync();
9216         return 0;
9217 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9218     case TARGET_NR_syncfs:
9219         return get_errno(syncfs(arg1));
9220 #endif
9221     case TARGET_NR_kill:
9222         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9223 #ifdef TARGET_NR_rename
9224     case TARGET_NR_rename:
9225         {
9226             void *p2;
9227             p = lock_user_string(arg1);
9228             p2 = lock_user_string(arg2);
9229             if (!p || !p2)
9230                 ret = -TARGET_EFAULT;
9231             else
9232                 ret = get_errno(rename(p, p2));
9233             unlock_user(p2, arg2, 0);
9234             unlock_user(p, arg1, 0);
9235         }
9236         return ret;
9237 #endif
9238 #if defined(TARGET_NR_renameat)
9239     case TARGET_NR_renameat:
9240         {
9241             void *p2;
9242             p  = lock_user_string(arg2);
9243             p2 = lock_user_string(arg4);
9244             if (!p || !p2)
9245                 ret = -TARGET_EFAULT;
9246             else
9247                 ret = get_errno(renameat(arg1, p, arg3, p2));
9248             unlock_user(p2, arg4, 0);
9249             unlock_user(p, arg2, 0);
9250         }
9251         return ret;
9252 #endif
9253 #if defined(TARGET_NR_renameat2)
9254     case TARGET_NR_renameat2:
9255         {
9256             void *p2;
9257             p  = lock_user_string(arg2);
9258             p2 = lock_user_string(arg4);
9259             if (!p || !p2) {
9260                 ret = -TARGET_EFAULT;
9261             } else {
9262                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9263             }
9264             unlock_user(p2, arg4, 0);
9265             unlock_user(p, arg2, 0);
9266         }
9267         return ret;
9268 #endif
9269 #ifdef TARGET_NR_mkdir
9270     case TARGET_NR_mkdir:
9271         if (!(p = lock_user_string(arg1)))
9272             return -TARGET_EFAULT;
9273         ret = get_errno(mkdir(p, arg2));
9274         unlock_user(p, arg1, 0);
9275         return ret;
9276 #endif
9277 #if defined(TARGET_NR_mkdirat)
9278     case TARGET_NR_mkdirat:
9279         if (!(p = lock_user_string(arg2)))
9280             return -TARGET_EFAULT;
9281         ret = get_errno(mkdirat(arg1, p, arg3));
9282         unlock_user(p, arg2, 0);
9283         return ret;
9284 #endif
9285 #ifdef TARGET_NR_rmdir
9286     case TARGET_NR_rmdir:
9287         if (!(p = lock_user_string(arg1)))
9288             return -TARGET_EFAULT;
9289         ret = get_errno(rmdir(p));
9290         unlock_user(p, arg1, 0);
9291         return ret;
9292 #endif
9293     case TARGET_NR_dup:
9294         ret = get_errno(dup(arg1));
9295         if (ret >= 0) {
9296             fd_trans_dup(arg1, ret);
9297         }
9298         return ret;
9299 #ifdef TARGET_NR_pipe
9300     case TARGET_NR_pipe:
9301         return do_pipe(cpu_env, arg1, 0, 0);
9302 #endif
9303 #ifdef TARGET_NR_pipe2
9304     case TARGET_NR_pipe2:
9305         return do_pipe(cpu_env, arg1,
9306                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9307 #endif
9308     case TARGET_NR_times:
9309         {
9310             struct target_tms *tmsp;
9311             struct tms tms;
9312             ret = get_errno(times(&tms));
9313             if (arg1) {
9314                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9315                 if (!tmsp)
9316                     return -TARGET_EFAULT;
9317                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9318                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9319                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9320                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9321             }
9322             if (!is_error(ret))
9323                 ret = host_to_target_clock_t(ret);
9324         }
9325         return ret;
9326     case TARGET_NR_acct:
9327         if (arg1 == 0) {
9328             ret = get_errno(acct(NULL));
9329         } else {
9330             if (!(p = lock_user_string(arg1))) {
9331                 return -TARGET_EFAULT;
9332             }
9333             ret = get_errno(acct(path(p)));
9334             unlock_user(p, arg1, 0);
9335         }
9336         return ret;
9337 #ifdef TARGET_NR_umount2
9338     case TARGET_NR_umount2:
9339         if (!(p = lock_user_string(arg1)))
9340             return -TARGET_EFAULT;
9341         ret = get_errno(umount2(p, arg2));
9342         unlock_user(p, arg1, 0);
9343         return ret;
9344 #endif
9345     case TARGET_NR_ioctl:
9346         return do_ioctl(arg1, arg2, arg3);
9347 #ifdef TARGET_NR_fcntl
9348     case TARGET_NR_fcntl:
9349         return do_fcntl(arg1, arg2, arg3);
9350 #endif
9351     case TARGET_NR_setpgid:
9352         return get_errno(setpgid(arg1, arg2));
9353     case TARGET_NR_umask:
9354         return get_errno(umask(arg1));
9355     case TARGET_NR_chroot:
9356         if (!(p = lock_user_string(arg1)))
9357             return -TARGET_EFAULT;
9358         ret = get_errno(chroot(p));
9359         unlock_user(p, arg1, 0);
9360         return ret;
9361 #ifdef TARGET_NR_dup2
9362     case TARGET_NR_dup2:
9363         ret = get_errno(dup2(arg1, arg2));
9364         if (ret >= 0) {
9365             fd_trans_dup(arg1, arg2);
9366         }
9367         return ret;
9368 #endif
9369 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9370     case TARGET_NR_dup3:
9371     {
9372         int host_flags;
9373 
9374         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9375             return -EINVAL;
9376         }
9377         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9378         ret = get_errno(dup3(arg1, arg2, host_flags));
9379         if (ret >= 0) {
9380             fd_trans_dup(arg1, arg2);
9381         }
9382         return ret;
9383     }
9384 #endif
9385 #ifdef TARGET_NR_getppid /* not on alpha */
9386     case TARGET_NR_getppid:
9387         return get_errno(getppid());
9388 #endif
9389 #ifdef TARGET_NR_getpgrp
9390     case TARGET_NR_getpgrp:
9391         return get_errno(getpgrp());
9392 #endif
9393     case TARGET_NR_setsid:
9394         return get_errno(setsid());
9395 #ifdef TARGET_NR_sigaction
9396     case TARGET_NR_sigaction:
9397         {
9398 #if defined(TARGET_MIPS)
9399 	    struct target_sigaction act, oact, *pact, *old_act;
9400 
9401 	    if (arg2) {
9402                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9403                     return -TARGET_EFAULT;
9404 		act._sa_handler = old_act->_sa_handler;
9405 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9406 		act.sa_flags = old_act->sa_flags;
9407 		unlock_user_struct(old_act, arg2, 0);
9408 		pact = &act;
9409 	    } else {
9410 		pact = NULL;
9411 	    }
9412 
9413         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9414 
9415 	    if (!is_error(ret) && arg3) {
9416                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9417                     return -TARGET_EFAULT;
9418 		old_act->_sa_handler = oact._sa_handler;
9419 		old_act->sa_flags = oact.sa_flags;
9420 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9421 		old_act->sa_mask.sig[1] = 0;
9422 		old_act->sa_mask.sig[2] = 0;
9423 		old_act->sa_mask.sig[3] = 0;
9424 		unlock_user_struct(old_act, arg3, 1);
9425 	    }
9426 #else
9427             struct target_old_sigaction *old_act;
9428             struct target_sigaction act, oact, *pact;
9429             if (arg2) {
9430                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9431                     return -TARGET_EFAULT;
9432                 act._sa_handler = old_act->_sa_handler;
9433                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9434                 act.sa_flags = old_act->sa_flags;
9435 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9436                 act.sa_restorer = old_act->sa_restorer;
9437 #endif
9438                 unlock_user_struct(old_act, arg2, 0);
9439                 pact = &act;
9440             } else {
9441                 pact = NULL;
9442             }
9443             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9444             if (!is_error(ret) && arg3) {
9445                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9446                     return -TARGET_EFAULT;
9447                 old_act->_sa_handler = oact._sa_handler;
9448                 old_act->sa_mask = oact.sa_mask.sig[0];
9449                 old_act->sa_flags = oact.sa_flags;
9450 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9451                 old_act->sa_restorer = oact.sa_restorer;
9452 #endif
9453                 unlock_user_struct(old_act, arg3, 1);
9454             }
9455 #endif
9456         }
9457         return ret;
9458 #endif
9459     case TARGET_NR_rt_sigaction:
9460         {
9461             /*
9462              * For Alpha and SPARC this is a 5 argument syscall, with
9463              * a 'restorer' parameter which must be copied into the
9464              * sa_restorer field of the sigaction struct.
9465              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9466              * and arg5 is the sigsetsize.
9467              */
9468 #if defined(TARGET_ALPHA)
9469             target_ulong sigsetsize = arg4;
9470             target_ulong restorer = arg5;
9471 #elif defined(TARGET_SPARC)
9472             target_ulong restorer = arg4;
9473             target_ulong sigsetsize = arg5;
9474 #else
9475             target_ulong sigsetsize = arg4;
9476             target_ulong restorer = 0;
9477 #endif
9478             struct target_sigaction *act = NULL;
9479             struct target_sigaction *oact = NULL;
9480 
9481             if (sigsetsize != sizeof(target_sigset_t)) {
9482                 return -TARGET_EINVAL;
9483             }
9484             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9485                 return -TARGET_EFAULT;
9486             }
9487             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9488                 ret = -TARGET_EFAULT;
9489             } else {
9490                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9491                 if (oact) {
9492                     unlock_user_struct(oact, arg3, 1);
9493                 }
9494             }
9495             if (act) {
9496                 unlock_user_struct(act, arg2, 0);
9497             }
9498         }
9499         return ret;
9500 #ifdef TARGET_NR_sgetmask /* not on alpha */
9501     case TARGET_NR_sgetmask:
9502         {
9503             sigset_t cur_set;
9504             abi_ulong target_set;
9505             ret = do_sigprocmask(0, NULL, &cur_set);
9506             if (!ret) {
9507                 host_to_target_old_sigset(&target_set, &cur_set);
9508                 ret = target_set;
9509             }
9510         }
9511         return ret;
9512 #endif
9513 #ifdef TARGET_NR_ssetmask /* not on alpha */
9514     case TARGET_NR_ssetmask:
9515         {
9516             sigset_t set, oset;
9517             abi_ulong target_set = arg1;
9518             target_to_host_old_sigset(&set, &target_set);
9519             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9520             if (!ret) {
9521                 host_to_target_old_sigset(&target_set, &oset);
9522                 ret = target_set;
9523             }
9524         }
9525         return ret;
9526 #endif
9527 #ifdef TARGET_NR_sigprocmask
9528     case TARGET_NR_sigprocmask:
9529         {
9530 #if defined(TARGET_ALPHA)
9531             sigset_t set, oldset;
9532             abi_ulong mask;
9533             int how;
9534 
9535             switch (arg1) {
9536             case TARGET_SIG_BLOCK:
9537                 how = SIG_BLOCK;
9538                 break;
9539             case TARGET_SIG_UNBLOCK:
9540                 how = SIG_UNBLOCK;
9541                 break;
9542             case TARGET_SIG_SETMASK:
9543                 how = SIG_SETMASK;
9544                 break;
9545             default:
9546                 return -TARGET_EINVAL;
9547             }
9548             mask = arg2;
9549             target_to_host_old_sigset(&set, &mask);
9550 
9551             ret = do_sigprocmask(how, &set, &oldset);
9552             if (!is_error(ret)) {
9553                 host_to_target_old_sigset(&mask, &oldset);
9554                 ret = mask;
9555                 cpu_env->ir[IR_V0] = 0; /* force no error */
9556             }
9557 #else
9558             sigset_t set, oldset, *set_ptr;
9559             int how;
9560 
9561             if (arg2) {
9562                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9563                 if (!p) {
9564                     return -TARGET_EFAULT;
9565                 }
9566                 target_to_host_old_sigset(&set, p);
9567                 unlock_user(p, arg2, 0);
9568                 set_ptr = &set;
9569                 switch (arg1) {
9570                 case TARGET_SIG_BLOCK:
9571                     how = SIG_BLOCK;
9572                     break;
9573                 case TARGET_SIG_UNBLOCK:
9574                     how = SIG_UNBLOCK;
9575                     break;
9576                 case TARGET_SIG_SETMASK:
9577                     how = SIG_SETMASK;
9578                     break;
9579                 default:
9580                     return -TARGET_EINVAL;
9581                 }
9582             } else {
9583                 how = 0;
9584                 set_ptr = NULL;
9585             }
9586             ret = do_sigprocmask(how, set_ptr, &oldset);
9587             if (!is_error(ret) && arg3) {
9588                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9589                     return -TARGET_EFAULT;
9590                 host_to_target_old_sigset(p, &oldset);
9591                 unlock_user(p, arg3, sizeof(target_sigset_t));
9592             }
9593 #endif
9594         }
9595         return ret;
9596 #endif
9597     case TARGET_NR_rt_sigprocmask:
9598         {
9599             int how = arg1;
9600             sigset_t set, oldset, *set_ptr;
9601 
9602             if (arg4 != sizeof(target_sigset_t)) {
9603                 return -TARGET_EINVAL;
9604             }
9605 
9606             if (arg2) {
9607                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9608                 if (!p) {
9609                     return -TARGET_EFAULT;
9610                 }
9611                 target_to_host_sigset(&set, p);
9612                 unlock_user(p, arg2, 0);
9613                 set_ptr = &set;
9614                 switch(how) {
9615                 case TARGET_SIG_BLOCK:
9616                     how = SIG_BLOCK;
9617                     break;
9618                 case TARGET_SIG_UNBLOCK:
9619                     how = SIG_UNBLOCK;
9620                     break;
9621                 case TARGET_SIG_SETMASK:
9622                     how = SIG_SETMASK;
9623                     break;
9624                 default:
9625                     return -TARGET_EINVAL;
9626                 }
9627             } else {
9628                 how = 0;
9629                 set_ptr = NULL;
9630             }
9631             ret = do_sigprocmask(how, set_ptr, &oldset);
9632             if (!is_error(ret) && arg3) {
9633                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9634                     return -TARGET_EFAULT;
9635                 host_to_target_sigset(p, &oldset);
9636                 unlock_user(p, arg3, sizeof(target_sigset_t));
9637             }
9638         }
9639         return ret;
9640 #ifdef TARGET_NR_sigpending
9641     case TARGET_NR_sigpending:
9642         {
9643             sigset_t set;
9644             ret = get_errno(sigpending(&set));
9645             if (!is_error(ret)) {
9646                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9647                     return -TARGET_EFAULT;
9648                 host_to_target_old_sigset(p, &set);
9649                 unlock_user(p, arg1, sizeof(target_sigset_t));
9650             }
9651         }
9652         return ret;
9653 #endif
9654     case TARGET_NR_rt_sigpending:
9655         {
9656             sigset_t set;
9657 
9658             /* Yes, this check is >, not != like most. We follow the kernel's
9659              * logic and it does it like this because it implements
9660              * NR_sigpending through the same code path, and in that case
9661              * the old_sigset_t is smaller in size.
9662              */
9663             if (arg2 > sizeof(target_sigset_t)) {
9664                 return -TARGET_EINVAL;
9665             }
9666 
9667             ret = get_errno(sigpending(&set));
9668             if (!is_error(ret)) {
9669                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9670                     return -TARGET_EFAULT;
9671                 host_to_target_sigset(p, &set);
9672                 unlock_user(p, arg1, sizeof(target_sigset_t));
9673             }
9674         }
9675         return ret;
9676 #ifdef TARGET_NR_sigsuspend
9677     case TARGET_NR_sigsuspend:
9678         {
9679             sigset_t *set;
9680 
9681 #if defined(TARGET_ALPHA)
9682             TaskState *ts = cpu->opaque;
9683             /* target_to_host_old_sigset will bswap back */
9684             abi_ulong mask = tswapal(arg1);
9685             set = &ts->sigsuspend_mask;
9686             target_to_host_old_sigset(set, &mask);
9687 #else
9688             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9689             if (ret != 0) {
9690                 return ret;
9691             }
9692 #endif
9693             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9694             finish_sigsuspend_mask(ret);
9695         }
9696         return ret;
9697 #endif
9698     case TARGET_NR_rt_sigsuspend:
9699         {
9700             sigset_t *set;
9701 
9702             ret = process_sigsuspend_mask(&set, arg1, arg2);
9703             if (ret != 0) {
9704                 return ret;
9705             }
9706             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9707             finish_sigsuspend_mask(ret);
9708         }
9709         return ret;
9710 #ifdef TARGET_NR_rt_sigtimedwait
9711     case TARGET_NR_rt_sigtimedwait:
9712         {
9713             sigset_t set;
9714             struct timespec uts, *puts;
9715             siginfo_t uinfo;
9716 
9717             if (arg4 != sizeof(target_sigset_t)) {
9718                 return -TARGET_EINVAL;
9719             }
9720 
9721             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9722                 return -TARGET_EFAULT;
9723             target_to_host_sigset(&set, p);
9724             unlock_user(p, arg1, 0);
9725             if (arg3) {
9726                 puts = &uts;
9727                 if (target_to_host_timespec(puts, arg3)) {
9728                     return -TARGET_EFAULT;
9729                 }
9730             } else {
9731                 puts = NULL;
9732             }
9733             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9734                                                  SIGSET_T_SIZE));
9735             if (!is_error(ret)) {
9736                 if (arg2) {
9737                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9738                                   0);
9739                     if (!p) {
9740                         return -TARGET_EFAULT;
9741                     }
9742                     host_to_target_siginfo(p, &uinfo);
9743                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9744                 }
9745                 ret = host_to_target_signal(ret);
9746             }
9747         }
9748         return ret;
9749 #endif
9750 #ifdef TARGET_NR_rt_sigtimedwait_time64
9751     case TARGET_NR_rt_sigtimedwait_time64:
9752         {
9753             sigset_t set;
9754             struct timespec uts, *puts;
9755             siginfo_t uinfo;
9756 
9757             if (arg4 != sizeof(target_sigset_t)) {
9758                 return -TARGET_EINVAL;
9759             }
9760 
9761             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9762             if (!p) {
9763                 return -TARGET_EFAULT;
9764             }
9765             target_to_host_sigset(&set, p);
9766             unlock_user(p, arg1, 0);
9767             if (arg3) {
9768                 puts = &uts;
9769                 if (target_to_host_timespec64(puts, arg3)) {
9770                     return -TARGET_EFAULT;
9771                 }
9772             } else {
9773                 puts = NULL;
9774             }
9775             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9776                                                  SIGSET_T_SIZE));
9777             if (!is_error(ret)) {
9778                 if (arg2) {
9779                     p = lock_user(VERIFY_WRITE, arg2,
9780                                   sizeof(target_siginfo_t), 0);
9781                     if (!p) {
9782                         return -TARGET_EFAULT;
9783                     }
9784                     host_to_target_siginfo(p, &uinfo);
9785                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9786                 }
9787                 ret = host_to_target_signal(ret);
9788             }
9789         }
9790         return ret;
9791 #endif
9792     case TARGET_NR_rt_sigqueueinfo:
9793         {
9794             siginfo_t uinfo;
9795 
9796             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9797             if (!p) {
9798                 return -TARGET_EFAULT;
9799             }
9800             target_to_host_siginfo(&uinfo, p);
9801             unlock_user(p, arg3, 0);
9802             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9803         }
9804         return ret;
9805     case TARGET_NR_rt_tgsigqueueinfo:
9806         {
9807             siginfo_t uinfo;
9808 
9809             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9810             if (!p) {
9811                 return -TARGET_EFAULT;
9812             }
9813             target_to_host_siginfo(&uinfo, p);
9814             unlock_user(p, arg4, 0);
9815             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9816         }
9817         return ret;
9818 #ifdef TARGET_NR_sigreturn
9819     case TARGET_NR_sigreturn:
9820         if (block_signals()) {
9821             return -QEMU_ERESTARTSYS;
9822         }
9823         return do_sigreturn(cpu_env);
9824 #endif
9825     case TARGET_NR_rt_sigreturn:
9826         if (block_signals()) {
9827             return -QEMU_ERESTARTSYS;
9828         }
9829         return do_rt_sigreturn(cpu_env);
9830     case TARGET_NR_sethostname:
9831         if (!(p = lock_user_string(arg1)))
9832             return -TARGET_EFAULT;
9833         ret = get_errno(sethostname(p, arg2));
9834         unlock_user(p, arg1, 0);
9835         return ret;
9836 #ifdef TARGET_NR_setrlimit
9837     case TARGET_NR_setrlimit:
9838         {
9839             int resource = target_to_host_resource(arg1);
9840             struct target_rlimit *target_rlim;
9841             struct rlimit rlim;
9842             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9843                 return -TARGET_EFAULT;
9844             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9845             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9846             unlock_user_struct(target_rlim, arg2, 0);
9847             /*
9848              * If we just passed through resource limit settings for memory then
9849              * they would also apply to QEMU's own allocations, and QEMU will
9850              * crash or hang or die if its allocations fail. Ideally we would
9851              * track the guest allocations in QEMU and apply the limits ourselves.
9852              * For now, just tell the guest the call succeeded but don't actually
9853              * limit anything.
9854              */
9855             if (resource != RLIMIT_AS &&
9856                 resource != RLIMIT_DATA &&
9857                 resource != RLIMIT_STACK) {
9858                 return get_errno(setrlimit(resource, &rlim));
9859             } else {
9860                 return 0;
9861             }
9862         }
9863 #endif
9864 #ifdef TARGET_NR_getrlimit
9865     case TARGET_NR_getrlimit:
9866         {
9867             int resource = target_to_host_resource(arg1);
9868             struct target_rlimit *target_rlim;
9869             struct rlimit rlim;
9870 
9871             ret = get_errno(getrlimit(resource, &rlim));
9872             if (!is_error(ret)) {
9873                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9874                     return -TARGET_EFAULT;
9875                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9876                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9877                 unlock_user_struct(target_rlim, arg2, 1);
9878             }
9879         }
9880         return ret;
9881 #endif
9882     case TARGET_NR_getrusage:
9883         {
9884             struct rusage rusage;
9885             ret = get_errno(getrusage(arg1, &rusage));
9886             if (!is_error(ret)) {
9887                 ret = host_to_target_rusage(arg2, &rusage);
9888             }
9889         }
9890         return ret;
9891 #if defined(TARGET_NR_gettimeofday)
9892     case TARGET_NR_gettimeofday:
9893         {
9894             struct timeval tv;
9895             struct timezone tz;
9896 
9897             ret = get_errno(gettimeofday(&tv, &tz));
9898             if (!is_error(ret)) {
9899                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9900                     return -TARGET_EFAULT;
9901                 }
9902                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9903                     return -TARGET_EFAULT;
9904                 }
9905             }
9906         }
9907         return ret;
9908 #endif
9909 #if defined(TARGET_NR_settimeofday)
9910     case TARGET_NR_settimeofday:
9911         {
9912             struct timeval tv, *ptv = NULL;
9913             struct timezone tz, *ptz = NULL;
9914 
9915             if (arg1) {
9916                 if (copy_from_user_timeval(&tv, arg1)) {
9917                     return -TARGET_EFAULT;
9918                 }
9919                 ptv = &tv;
9920             }
9921 
9922             if (arg2) {
9923                 if (copy_from_user_timezone(&tz, arg2)) {
9924                     return -TARGET_EFAULT;
9925                 }
9926                 ptz = &tz;
9927             }
9928 
9929             return get_errno(settimeofday(ptv, ptz));
9930         }
9931 #endif
9932 #if defined(TARGET_NR_select)
9933     case TARGET_NR_select:
9934 #if defined(TARGET_WANT_NI_OLD_SELECT)
9935         /* some architectures used to have old_select here
9936          * but now ENOSYS it.
9937          */
9938         ret = -TARGET_ENOSYS;
9939 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9940         ret = do_old_select(arg1);
9941 #else
9942         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9943 #endif
9944         return ret;
9945 #endif
9946 #ifdef TARGET_NR_pselect6
9947     case TARGET_NR_pselect6:
9948         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9949 #endif
9950 #ifdef TARGET_NR_pselect6_time64
9951     case TARGET_NR_pselect6_time64:
9952         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9953 #endif
9954 #ifdef TARGET_NR_symlink
9955     case TARGET_NR_symlink:
9956         {
9957             void *p2;
9958             p = lock_user_string(arg1);
9959             p2 = lock_user_string(arg2);
9960             if (!p || !p2)
9961                 ret = -TARGET_EFAULT;
9962             else
9963                 ret = get_errno(symlink(p, p2));
9964             unlock_user(p2, arg2, 0);
9965             unlock_user(p, arg1, 0);
9966         }
9967         return ret;
9968 #endif
9969 #if defined(TARGET_NR_symlinkat)
9970     case TARGET_NR_symlinkat:
9971         {
9972             void *p2;
9973             p  = lock_user_string(arg1);
9974             p2 = lock_user_string(arg3);
9975             if (!p || !p2)
9976                 ret = -TARGET_EFAULT;
9977             else
9978                 ret = get_errno(symlinkat(p, arg2, p2));
9979             unlock_user(p2, arg3, 0);
9980             unlock_user(p, arg1, 0);
9981         }
9982         return ret;
9983 #endif
9984 #ifdef TARGET_NR_readlink
9985     case TARGET_NR_readlink:
9986         {
9987             void *p2;
9988             p = lock_user_string(arg1);
9989             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9990             if (!p || !p2) {
9991                 ret = -TARGET_EFAULT;
9992             } else if (!arg3) {
9993                 /* Short circuit this for the magic exe check. */
9994                 ret = -TARGET_EINVAL;
9995             } else if (is_proc_myself((const char *)p, "exe")) {
9996                 /*
9997                  * Don't worry about sign mismatch as earlier mapping
9998                  * logic would have thrown a bad address error.
9999                  */
10000                 ret = MIN(strlen(exec_path), arg3);
10001                 /* We cannot NUL terminate the string. */
10002                 memcpy(p2, exec_path, ret);
10003             } else {
10004                 ret = get_errno(readlink(path(p), p2, arg3));
10005             }
10006             unlock_user(p2, arg2, ret);
10007             unlock_user(p, arg1, 0);
10008         }
10009         return ret;
10010 #endif
10011 #if defined(TARGET_NR_readlinkat)
10012     case TARGET_NR_readlinkat:
10013         {
10014             void *p2;
10015             p  = lock_user_string(arg2);
10016             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10017             if (!p || !p2) {
10018                 ret = -TARGET_EFAULT;
10019             } else if (!arg4) {
10020                 /* Short circuit this for the magic exe check. */
10021                 ret = -TARGET_EINVAL;
10022             } else if (is_proc_myself((const char *)p, "exe")) {
10023                 /*
10024                  * Don't worry about sign mismatch as earlier mapping
10025                  * logic would have thrown a bad address error.
10026                  */
10027                 ret = MIN(strlen(exec_path), arg4);
10028                 /* We cannot NUL terminate the string. */
10029                 memcpy(p2, exec_path, ret);
10030             } else {
10031                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10032             }
10033             unlock_user(p2, arg3, ret);
10034             unlock_user(p, arg2, 0);
10035         }
10036         return ret;
10037 #endif
10038 #ifdef TARGET_NR_swapon
10039     case TARGET_NR_swapon:
10040         if (!(p = lock_user_string(arg1)))
10041             return -TARGET_EFAULT;
10042         ret = get_errno(swapon(p, arg2));
10043         unlock_user(p, arg1, 0);
10044         return ret;
10045 #endif
10046     case TARGET_NR_reboot:
10047         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10048            /* arg4 must be ignored in all other cases */
10049            p = lock_user_string(arg4);
10050            if (!p) {
10051                return -TARGET_EFAULT;
10052            }
10053            ret = get_errno(reboot(arg1, arg2, arg3, p));
10054            unlock_user(p, arg4, 0);
10055         } else {
10056            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10057         }
10058         return ret;
10059 #ifdef TARGET_NR_mmap
10060     case TARGET_NR_mmap:
10061 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10062     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10063     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10064     || defined(TARGET_S390X)
10065         {
10066             abi_ulong *v;
10067             abi_ulong v1, v2, v3, v4, v5, v6;
10068             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10069                 return -TARGET_EFAULT;
10070             v1 = tswapal(v[0]);
10071             v2 = tswapal(v[1]);
10072             v3 = tswapal(v[2]);
10073             v4 = tswapal(v[3]);
10074             v5 = tswapal(v[4]);
10075             v6 = tswapal(v[5]);
10076             unlock_user(v, arg1, 0);
10077             ret = get_errno(target_mmap(v1, v2, v3,
10078                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10079                                         v5, v6));
10080         }
10081 #else
10082         /* mmap pointers are always untagged */
10083         ret = get_errno(target_mmap(arg1, arg2, arg3,
10084                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10085                                     arg5,
10086                                     arg6));
10087 #endif
10088         return ret;
10089 #endif
10090 #ifdef TARGET_NR_mmap2
10091     case TARGET_NR_mmap2:
10092 #ifndef MMAP_SHIFT
10093 #define MMAP_SHIFT 12
10094 #endif
10095         ret = target_mmap(arg1, arg2, arg3,
10096                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10097                           arg5, arg6 << MMAP_SHIFT);
10098         return get_errno(ret);
10099 #endif
10100     case TARGET_NR_munmap:
10101         arg1 = cpu_untagged_addr(cpu, arg1);
10102         return get_errno(target_munmap(arg1, arg2));
10103     case TARGET_NR_mprotect:
10104         arg1 = cpu_untagged_addr(cpu, arg1);
10105         {
10106             TaskState *ts = cpu->opaque;
10107             /* Special hack to detect libc making the stack executable.  */
10108             if ((arg3 & PROT_GROWSDOWN)
10109                 && arg1 >= ts->info->stack_limit
10110                 && arg1 <= ts->info->start_stack) {
10111                 arg3 &= ~PROT_GROWSDOWN;
10112                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10113                 arg1 = ts->info->stack_limit;
10114             }
10115         }
10116         return get_errno(target_mprotect(arg1, arg2, arg3));
10117 #ifdef TARGET_NR_mremap
10118     case TARGET_NR_mremap:
10119         arg1 = cpu_untagged_addr(cpu, arg1);
10120         /* mremap new_addr (arg5) is always untagged */
10121         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10122 #endif
10123         /* ??? msync/mlock/munlock are broken for softmmu.  */
10124 #ifdef TARGET_NR_msync
10125     case TARGET_NR_msync:
10126         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10127 #endif
10128 #ifdef TARGET_NR_mlock
10129     case TARGET_NR_mlock:
10130         return get_errno(mlock(g2h(cpu, arg1), arg2));
10131 #endif
10132 #ifdef TARGET_NR_munlock
10133     case TARGET_NR_munlock:
10134         return get_errno(munlock(g2h(cpu, arg1), arg2));
10135 #endif
10136 #ifdef TARGET_NR_mlockall
10137     case TARGET_NR_mlockall:
10138         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10139 #endif
10140 #ifdef TARGET_NR_munlockall
10141     case TARGET_NR_munlockall:
10142         return get_errno(munlockall());
10143 #endif
10144 #ifdef TARGET_NR_truncate
10145     case TARGET_NR_truncate:
10146         if (!(p = lock_user_string(arg1)))
10147             return -TARGET_EFAULT;
10148         ret = get_errno(truncate(p, arg2));
10149         unlock_user(p, arg1, 0);
10150         return ret;
10151 #endif
10152 #ifdef TARGET_NR_ftruncate
10153     case TARGET_NR_ftruncate:
10154         return get_errno(ftruncate(arg1, arg2));
10155 #endif
10156     case TARGET_NR_fchmod:
10157         return get_errno(fchmod(arg1, arg2));
10158 #if defined(TARGET_NR_fchmodat)
10159     case TARGET_NR_fchmodat:
10160         if (!(p = lock_user_string(arg2)))
10161             return -TARGET_EFAULT;
10162         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10163         unlock_user(p, arg2, 0);
10164         return ret;
10165 #endif
10166     case TARGET_NR_getpriority:
10167         /* Note that negative values are valid for getpriority, so we must
10168            differentiate based on errno settings.  */
10169         errno = 0;
10170         ret = getpriority(arg1, arg2);
10171         if (ret == -1 && errno != 0) {
10172             return -host_to_target_errno(errno);
10173         }
10174 #ifdef TARGET_ALPHA
10175         /* Return value is the unbiased priority.  Signal no error.  */
10176         cpu_env->ir[IR_V0] = 0;
10177 #else
10178         /* Return value is a biased priority to avoid negative numbers.  */
10179         ret = 20 - ret;
10180 #endif
10181         return ret;
10182     case TARGET_NR_setpriority:
10183         return get_errno(setpriority(arg1, arg2, arg3));
10184 #ifdef TARGET_NR_statfs
10185     case TARGET_NR_statfs:
10186         if (!(p = lock_user_string(arg1))) {
10187             return -TARGET_EFAULT;
10188         }
10189         ret = get_errno(statfs(path(p), &stfs));
10190         unlock_user(p, arg1, 0);
10191     convert_statfs:
10192         if (!is_error(ret)) {
10193             struct target_statfs *target_stfs;
10194 
10195             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10196                 return -TARGET_EFAULT;
10197             __put_user(stfs.f_type, &target_stfs->f_type);
10198             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10199             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10200             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10201             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10202             __put_user(stfs.f_files, &target_stfs->f_files);
10203             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10204             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10205             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10206             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10207             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10208 #ifdef _STATFS_F_FLAGS
10209             __put_user(stfs.f_flags, &target_stfs->f_flags);
10210 #else
10211             __put_user(0, &target_stfs->f_flags);
10212 #endif
10213             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10214             unlock_user_struct(target_stfs, arg2, 1);
10215         }
10216         return ret;
10217 #endif
10218 #ifdef TARGET_NR_fstatfs
10219     case TARGET_NR_fstatfs:
10220         ret = get_errno(fstatfs(arg1, &stfs));
10221         goto convert_statfs;
10222 #endif
10223 #ifdef TARGET_NR_statfs64
10224     case TARGET_NR_statfs64:
10225         if (!(p = lock_user_string(arg1))) {
10226             return -TARGET_EFAULT;
10227         }
10228         ret = get_errno(statfs(path(p), &stfs));
10229         unlock_user(p, arg1, 0);
10230     convert_statfs64:
10231         if (!is_error(ret)) {
10232             struct target_statfs64 *target_stfs;
10233 
10234             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10235                 return -TARGET_EFAULT;
10236             __put_user(stfs.f_type, &target_stfs->f_type);
10237             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10238             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10239             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10240             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10241             __put_user(stfs.f_files, &target_stfs->f_files);
10242             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10243             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10244             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10245             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10246             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10247 #ifdef _STATFS_F_FLAGS
10248             __put_user(stfs.f_flags, &target_stfs->f_flags);
10249 #else
10250             __put_user(0, &target_stfs->f_flags);
10251 #endif
10252             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10253             unlock_user_struct(target_stfs, arg3, 1);
10254         }
10255         return ret;
10256     case TARGET_NR_fstatfs64:
10257         ret = get_errno(fstatfs(arg1, &stfs));
10258         goto convert_statfs64;
10259 #endif
10260 #ifdef TARGET_NR_socketcall
10261     case TARGET_NR_socketcall:
10262         return do_socketcall(arg1, arg2);
10263 #endif
10264 #ifdef TARGET_NR_accept
10265     case TARGET_NR_accept:
10266         return do_accept4(arg1, arg2, arg3, 0);
10267 #endif
10268 #ifdef TARGET_NR_accept4
10269     case TARGET_NR_accept4:
10270         return do_accept4(arg1, arg2, arg3, arg4);
10271 #endif
10272 #ifdef TARGET_NR_bind
10273     case TARGET_NR_bind:
10274         return do_bind(arg1, arg2, arg3);
10275 #endif
10276 #ifdef TARGET_NR_connect
10277     case TARGET_NR_connect:
10278         return do_connect(arg1, arg2, arg3);
10279 #endif
10280 #ifdef TARGET_NR_getpeername
10281     case TARGET_NR_getpeername:
10282         return do_getpeername(arg1, arg2, arg3);
10283 #endif
10284 #ifdef TARGET_NR_getsockname
10285     case TARGET_NR_getsockname:
10286         return do_getsockname(arg1, arg2, arg3);
10287 #endif
10288 #ifdef TARGET_NR_getsockopt
10289     case TARGET_NR_getsockopt:
10290         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10291 #endif
10292 #ifdef TARGET_NR_listen
10293     case TARGET_NR_listen:
10294         return get_errno(listen(arg1, arg2));
10295 #endif
10296 #ifdef TARGET_NR_recv
10297     case TARGET_NR_recv:
10298         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10299 #endif
10300 #ifdef TARGET_NR_recvfrom
10301     case TARGET_NR_recvfrom:
10302         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10303 #endif
10304 #ifdef TARGET_NR_recvmsg
10305     case TARGET_NR_recvmsg:
10306         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10307 #endif
10308 #ifdef TARGET_NR_send
10309     case TARGET_NR_send:
10310         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10311 #endif
10312 #ifdef TARGET_NR_sendmsg
10313     case TARGET_NR_sendmsg:
10314         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10315 #endif
10316 #ifdef TARGET_NR_sendmmsg
10317     case TARGET_NR_sendmmsg:
10318         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10319 #endif
10320 #ifdef TARGET_NR_recvmmsg
10321     case TARGET_NR_recvmmsg:
10322         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10323 #endif
10324 #ifdef TARGET_NR_sendto
10325     case TARGET_NR_sendto:
10326         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10327 #endif
10328 #ifdef TARGET_NR_shutdown
10329     case TARGET_NR_shutdown:
10330         return get_errno(shutdown(arg1, arg2));
10331 #endif
10332 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10333     case TARGET_NR_getrandom:
10334         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10335         if (!p) {
10336             return -TARGET_EFAULT;
10337         }
10338         ret = get_errno(getrandom(p, arg2, arg3));
10339         unlock_user(p, arg1, ret);
10340         return ret;
10341 #endif
10342 #ifdef TARGET_NR_socket
10343     case TARGET_NR_socket:
10344         return do_socket(arg1, arg2, arg3);
10345 #endif
10346 #ifdef TARGET_NR_socketpair
10347     case TARGET_NR_socketpair:
10348         return do_socketpair(arg1, arg2, arg3, arg4);
10349 #endif
10350 #ifdef TARGET_NR_setsockopt
10351     case TARGET_NR_setsockopt:
10352         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10353 #endif
10354 #if defined(TARGET_NR_syslog)
10355     case TARGET_NR_syslog:
10356         {
10357             int len = arg2;
10358 
10359             switch (arg1) {
10360             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10361             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10362             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10363             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10364             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10365             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10366             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10367             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10368                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10369             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10370             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10371             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10372                 {
10373                     if (len < 0) {
10374                         return -TARGET_EINVAL;
10375                     }
10376                     if (len == 0) {
10377                         return 0;
10378                     }
10379                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10380                     if (!p) {
10381                         return -TARGET_EFAULT;
10382                     }
10383                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10384                     unlock_user(p, arg2, arg3);
10385                 }
10386                 return ret;
10387             default:
10388                 return -TARGET_EINVAL;
10389             }
10390         }
10391         break;
10392 #endif
10393     case TARGET_NR_setitimer:
10394         {
10395             struct itimerval value, ovalue, *pvalue;
10396 
10397             if (arg2) {
10398                 pvalue = &value;
10399                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10400                     || copy_from_user_timeval(&pvalue->it_value,
10401                                               arg2 + sizeof(struct target_timeval)))
10402                     return -TARGET_EFAULT;
10403             } else {
10404                 pvalue = NULL;
10405             }
10406             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10407             if (!is_error(ret) && arg3) {
10408                 if (copy_to_user_timeval(arg3,
10409                                          &ovalue.it_interval)
10410                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10411                                             &ovalue.it_value))
10412                     return -TARGET_EFAULT;
10413             }
10414         }
10415         return ret;
10416     case TARGET_NR_getitimer:
10417         {
10418             struct itimerval value;
10419 
10420             ret = get_errno(getitimer(arg1, &value));
10421             if (!is_error(ret) && arg2) {
10422                 if (copy_to_user_timeval(arg2,
10423                                          &value.it_interval)
10424                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10425                                             &value.it_value))
10426                     return -TARGET_EFAULT;
10427             }
10428         }
10429         return ret;
10430 #ifdef TARGET_NR_stat
10431     case TARGET_NR_stat:
10432         if (!(p = lock_user_string(arg1))) {
10433             return -TARGET_EFAULT;
10434         }
10435         ret = get_errno(stat(path(p), &st));
10436         unlock_user(p, arg1, 0);
10437         goto do_stat;
10438 #endif
10439 #ifdef TARGET_NR_lstat
10440     case TARGET_NR_lstat:
10441         if (!(p = lock_user_string(arg1))) {
10442             return -TARGET_EFAULT;
10443         }
10444         ret = get_errno(lstat(path(p), &st));
10445         unlock_user(p, arg1, 0);
10446         goto do_stat;
10447 #endif
10448 #ifdef TARGET_NR_fstat
10449     case TARGET_NR_fstat:
10450         {
10451             ret = get_errno(fstat(arg1, &st));
10452 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10453         do_stat:
10454 #endif
10455             if (!is_error(ret)) {
10456                 struct target_stat *target_st;
10457 
10458                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10459                     return -TARGET_EFAULT;
10460                 memset(target_st, 0, sizeof(*target_st));
10461                 __put_user(st.st_dev, &target_st->st_dev);
10462                 __put_user(st.st_ino, &target_st->st_ino);
10463                 __put_user(st.st_mode, &target_st->st_mode);
10464                 __put_user(st.st_uid, &target_st->st_uid);
10465                 __put_user(st.st_gid, &target_st->st_gid);
10466                 __put_user(st.st_nlink, &target_st->st_nlink);
10467                 __put_user(st.st_rdev, &target_st->st_rdev);
10468                 __put_user(st.st_size, &target_st->st_size);
10469                 __put_user(st.st_blksize, &target_st->st_blksize);
10470                 __put_user(st.st_blocks, &target_st->st_blocks);
10471                 __put_user(st.st_atime, &target_st->target_st_atime);
10472                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10473                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10474 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10475                 __put_user(st.st_atim.tv_nsec,
10476                            &target_st->target_st_atime_nsec);
10477                 __put_user(st.st_mtim.tv_nsec,
10478                            &target_st->target_st_mtime_nsec);
10479                 __put_user(st.st_ctim.tv_nsec,
10480                            &target_st->target_st_ctime_nsec);
10481 #endif
10482                 unlock_user_struct(target_st, arg2, 1);
10483             }
10484         }
10485         return ret;
10486 #endif
10487     case TARGET_NR_vhangup:
10488         return get_errno(vhangup());
10489 #ifdef TARGET_NR_syscall
10490     case TARGET_NR_syscall:
10491         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10492                           arg6, arg7, arg8, 0);
10493 #endif
10494 #if defined(TARGET_NR_wait4)
10495     case TARGET_NR_wait4:
10496         {
10497             int status;
10498             abi_long status_ptr = arg2;
10499             struct rusage rusage, *rusage_ptr;
10500             abi_ulong target_rusage = arg4;
10501             abi_long rusage_err;
10502             if (target_rusage)
10503                 rusage_ptr = &rusage;
10504             else
10505                 rusage_ptr = NULL;
10506             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10507             if (!is_error(ret)) {
10508                 if (status_ptr && ret) {
10509                     status = host_to_target_waitstatus(status);
10510                     if (put_user_s32(status, status_ptr))
10511                         return -TARGET_EFAULT;
10512                 }
10513                 if (target_rusage) {
10514                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10515                     if (rusage_err) {
10516                         ret = rusage_err;
10517                     }
10518                 }
10519             }
10520         }
10521         return ret;
10522 #endif
10523 #ifdef TARGET_NR_swapoff
10524     case TARGET_NR_swapoff:
10525         if (!(p = lock_user_string(arg1)))
10526             return -TARGET_EFAULT;
10527         ret = get_errno(swapoff(p));
10528         unlock_user(p, arg1, 0);
10529         return ret;
10530 #endif
10531     case TARGET_NR_sysinfo:
10532         {
10533             struct target_sysinfo *target_value;
10534             struct sysinfo value;
10535             ret = get_errno(sysinfo(&value));
10536             if (!is_error(ret) && arg1)
10537             {
10538                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10539                     return -TARGET_EFAULT;
10540                 __put_user(value.uptime, &target_value->uptime);
10541                 __put_user(value.loads[0], &target_value->loads[0]);
10542                 __put_user(value.loads[1], &target_value->loads[1]);
10543                 __put_user(value.loads[2], &target_value->loads[2]);
10544                 __put_user(value.totalram, &target_value->totalram);
10545                 __put_user(value.freeram, &target_value->freeram);
10546                 __put_user(value.sharedram, &target_value->sharedram);
10547                 __put_user(value.bufferram, &target_value->bufferram);
10548                 __put_user(value.totalswap, &target_value->totalswap);
10549                 __put_user(value.freeswap, &target_value->freeswap);
10550                 __put_user(value.procs, &target_value->procs);
10551                 __put_user(value.totalhigh, &target_value->totalhigh);
10552                 __put_user(value.freehigh, &target_value->freehigh);
10553                 __put_user(value.mem_unit, &target_value->mem_unit);
10554                 unlock_user_struct(target_value, arg1, 1);
10555             }
10556         }
10557         return ret;
10558 #ifdef TARGET_NR_ipc
10559     case TARGET_NR_ipc:
10560         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10561 #endif
10562 #ifdef TARGET_NR_semget
10563     case TARGET_NR_semget:
10564         return get_errno(semget(arg1, arg2, arg3));
10565 #endif
10566 #ifdef TARGET_NR_semop
10567     case TARGET_NR_semop:
10568         return do_semtimedop(arg1, arg2, arg3, 0, false);
10569 #endif
10570 #ifdef TARGET_NR_semtimedop
10571     case TARGET_NR_semtimedop:
10572         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10573 #endif
10574 #ifdef TARGET_NR_semtimedop_time64
10575     case TARGET_NR_semtimedop_time64:
10576         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10577 #endif
10578 #ifdef TARGET_NR_semctl
10579     case TARGET_NR_semctl:
10580         return do_semctl(arg1, arg2, arg3, arg4);
10581 #endif
10582 #ifdef TARGET_NR_msgctl
10583     case TARGET_NR_msgctl:
10584         return do_msgctl(arg1, arg2, arg3);
10585 #endif
10586 #ifdef TARGET_NR_msgget
10587     case TARGET_NR_msgget:
10588         return get_errno(msgget(arg1, arg2));
10589 #endif
10590 #ifdef TARGET_NR_msgrcv
10591     case TARGET_NR_msgrcv:
10592         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10593 #endif
10594 #ifdef TARGET_NR_msgsnd
10595     case TARGET_NR_msgsnd:
10596         return do_msgsnd(arg1, arg2, arg3, arg4);
10597 #endif
10598 #ifdef TARGET_NR_shmget
10599     case TARGET_NR_shmget:
10600         return get_errno(shmget(arg1, arg2, arg3));
10601 #endif
10602 #ifdef TARGET_NR_shmctl
10603     case TARGET_NR_shmctl:
10604         return do_shmctl(arg1, arg2, arg3);
10605 #endif
10606 #ifdef TARGET_NR_shmat
10607     case TARGET_NR_shmat:
10608         return do_shmat(cpu_env, arg1, arg2, arg3);
10609 #endif
10610 #ifdef TARGET_NR_shmdt
10611     case TARGET_NR_shmdt:
10612         return do_shmdt(arg1);
10613 #endif
10614     case TARGET_NR_fsync:
10615         return get_errno(fsync(arg1));
10616     case TARGET_NR_clone:
10617         /* Linux manages to have three different orderings for its
10618          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10619          * match the kernel's CONFIG_CLONE_* settings.
10620          * Microblaze is further special in that it uses a sixth
10621          * implicit argument to clone for the TLS pointer.
10622          */
10623 #if defined(TARGET_MICROBLAZE)
10624         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10625 #elif defined(TARGET_CLONE_BACKWARDS)
10626         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10627 #elif defined(TARGET_CLONE_BACKWARDS2)
10628         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10629 #else
10630         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10631 #endif
10632         return ret;
10633 #ifdef __NR_exit_group
10634         /* new thread calls */
10635     case TARGET_NR_exit_group:
10636         preexit_cleanup(cpu_env, arg1);
10637         return get_errno(exit_group(arg1));
10638 #endif
10639     case TARGET_NR_setdomainname:
10640         if (!(p = lock_user_string(arg1)))
10641             return -TARGET_EFAULT;
10642         ret = get_errno(setdomainname(p, arg2));
10643         unlock_user(p, arg1, 0);
10644         return ret;
10645     case TARGET_NR_uname:
10646         /* no need to transcode because we use the linux syscall */
10647         {
10648             struct new_utsname * buf;
10649 
10650             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10651                 return -TARGET_EFAULT;
10652             ret = get_errno(sys_uname(buf));
10653             if (!is_error(ret)) {
10654                 /* Overwrite the native machine name with whatever is being
10655                    emulated. */
10656                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10657                           sizeof(buf->machine));
10658                 /* Allow the user to override the reported release.  */
10659                 if (qemu_uname_release && *qemu_uname_release) {
10660                     g_strlcpy(buf->release, qemu_uname_release,
10661                               sizeof(buf->release));
10662                 }
10663             }
10664             unlock_user_struct(buf, arg1, 1);
10665         }
10666         return ret;
10667 #ifdef TARGET_I386
10668     case TARGET_NR_modify_ldt:
10669         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10670 #if !defined(TARGET_X86_64)
10671     case TARGET_NR_vm86:
10672         return do_vm86(cpu_env, arg1, arg2);
10673 #endif
10674 #endif
10675 #if defined(TARGET_NR_adjtimex)
10676     case TARGET_NR_adjtimex:
10677         {
10678             struct timex host_buf;
10679 
10680             if (target_to_host_timex(&host_buf, arg1) != 0) {
10681                 return -TARGET_EFAULT;
10682             }
10683             ret = get_errno(adjtimex(&host_buf));
10684             if (!is_error(ret)) {
10685                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10686                     return -TARGET_EFAULT;
10687                 }
10688             }
10689         }
10690         return ret;
10691 #endif
10692 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10693     case TARGET_NR_clock_adjtime:
10694         {
10695             struct timex htx, *phtx = &htx;
10696 
10697             if (target_to_host_timex(phtx, arg2) != 0) {
10698                 return -TARGET_EFAULT;
10699             }
10700             ret = get_errno(clock_adjtime(arg1, phtx));
10701             if (!is_error(ret) && phtx) {
10702                 if (host_to_target_timex(arg2, phtx) != 0) {
10703                     return -TARGET_EFAULT;
10704                 }
10705             }
10706         }
10707         return ret;
10708 #endif
10709 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10710     case TARGET_NR_clock_adjtime64:
10711         {
10712             struct timex htx;
10713 
10714             if (target_to_host_timex64(&htx, arg2) != 0) {
10715                 return -TARGET_EFAULT;
10716             }
10717             ret = get_errno(clock_adjtime(arg1, &htx));
10718             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10719                     return -TARGET_EFAULT;
10720             }
10721         }
10722         return ret;
10723 #endif
10724     case TARGET_NR_getpgid:
10725         return get_errno(getpgid(arg1));
10726     case TARGET_NR_fchdir:
10727         return get_errno(fchdir(arg1));
10728     case TARGET_NR_personality:
10729         return get_errno(personality(arg1));
10730 #ifdef TARGET_NR__llseek /* Not on alpha */
10731     case TARGET_NR__llseek:
10732         {
10733             int64_t res;
10734 #if !defined(__NR_llseek)
10735             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10736             if (res == -1) {
10737                 ret = get_errno(res);
10738             } else {
10739                 ret = 0;
10740             }
10741 #else
10742             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10743 #endif
10744             if ((ret == 0) && put_user_s64(res, arg4)) {
10745                 return -TARGET_EFAULT;
10746             }
10747         }
10748         return ret;
10749 #endif
10750 #ifdef TARGET_NR_getdents
10751     case TARGET_NR_getdents:
10752         return do_getdents(arg1, arg2, arg3);
10753 #endif /* TARGET_NR_getdents */
10754 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10755     case TARGET_NR_getdents64:
10756         return do_getdents64(arg1, arg2, arg3);
10757 #endif /* TARGET_NR_getdents64 */
10758 #if defined(TARGET_NR__newselect)
10759     case TARGET_NR__newselect:
10760         return do_select(arg1, arg2, arg3, arg4, arg5);
10761 #endif
10762 #ifdef TARGET_NR_poll
10763     case TARGET_NR_poll:
10764         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10765 #endif
10766 #ifdef TARGET_NR_ppoll
10767     case TARGET_NR_ppoll:
10768         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10769 #endif
10770 #ifdef TARGET_NR_ppoll_time64
10771     case TARGET_NR_ppoll_time64:
10772         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10773 #endif
10774     case TARGET_NR_flock:
10775         /* NOTE: the flock constant seems to be the same for every
10776            Linux platform */
10777         return get_errno(safe_flock(arg1, arg2));
10778     case TARGET_NR_readv:
10779         {
10780             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10781             if (vec != NULL) {
10782                 ret = get_errno(safe_readv(arg1, vec, arg3));
10783                 unlock_iovec(vec, arg2, arg3, 1);
10784             } else {
10785                 ret = -host_to_target_errno(errno);
10786             }
10787         }
10788         return ret;
10789     case TARGET_NR_writev:
10790         {
10791             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10792             if (vec != NULL) {
10793                 ret = get_errno(safe_writev(arg1, vec, arg3));
10794                 unlock_iovec(vec, arg2, arg3, 0);
10795             } else {
10796                 ret = -host_to_target_errno(errno);
10797             }
10798         }
10799         return ret;
10800 #if defined(TARGET_NR_preadv)
10801     case TARGET_NR_preadv:
10802         {
10803             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10804             if (vec != NULL) {
10805                 unsigned long low, high;
10806 
10807                 target_to_host_low_high(arg4, arg5, &low, &high);
10808                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10809                 unlock_iovec(vec, arg2, arg3, 1);
10810             } else {
10811                 ret = -host_to_target_errno(errno);
10812            }
10813         }
10814         return ret;
10815 #endif
10816 #if defined(TARGET_NR_pwritev)
10817     case TARGET_NR_pwritev:
10818         {
10819             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10820             if (vec != NULL) {
10821                 unsigned long low, high;
10822 
10823                 target_to_host_low_high(arg4, arg5, &low, &high);
10824                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10825                 unlock_iovec(vec, arg2, arg3, 0);
10826             } else {
10827                 ret = -host_to_target_errno(errno);
10828            }
10829         }
10830         return ret;
10831 #endif
10832     case TARGET_NR_getsid:
10833         return get_errno(getsid(arg1));
10834 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10835     case TARGET_NR_fdatasync:
10836         return get_errno(fdatasync(arg1));
10837 #endif
10838     case TARGET_NR_sched_getaffinity:
10839         {
10840             unsigned int mask_size;
10841             unsigned long *mask;
10842 
10843             /*
10844              * sched_getaffinity needs multiples of ulong, so need to take
10845              * care of mismatches between target ulong and host ulong sizes.
10846              */
10847             if (arg2 & (sizeof(abi_ulong) - 1)) {
10848                 return -TARGET_EINVAL;
10849             }
10850             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10851 
10852             mask = alloca(mask_size);
10853             memset(mask, 0, mask_size);
10854             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10855 
10856             if (!is_error(ret)) {
10857                 if (ret > arg2) {
10858                     /* More data returned than the caller's buffer will fit.
10859                      * This only happens if sizeof(abi_long) < sizeof(long)
10860                      * and the caller passed us a buffer holding an odd number
10861                      * of abi_longs. If the host kernel is actually using the
10862                      * extra 4 bytes then fail EINVAL; otherwise we can just
10863                      * ignore them and only copy the interesting part.
10864                      */
10865                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10866                     if (numcpus > arg2 * 8) {
10867                         return -TARGET_EINVAL;
10868                     }
10869                     ret = arg2;
10870                 }
10871 
10872                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10873                     return -TARGET_EFAULT;
10874                 }
10875             }
10876         }
10877         return ret;
10878     case TARGET_NR_sched_setaffinity:
10879         {
10880             unsigned int mask_size;
10881             unsigned long *mask;
10882 
10883             /*
10884              * sched_setaffinity needs multiples of ulong, so need to take
10885              * care of mismatches between target ulong and host ulong sizes.
10886              */
10887             if (arg2 & (sizeof(abi_ulong) - 1)) {
10888                 return -TARGET_EINVAL;
10889             }
10890             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10891             mask = alloca(mask_size);
10892 
10893             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10894             if (ret) {
10895                 return ret;
10896             }
10897 
10898             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10899         }
10900     case TARGET_NR_getcpu:
10901         {
10902             unsigned cpu, node;
10903             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10904                                        arg2 ? &node : NULL,
10905                                        NULL));
10906             if (is_error(ret)) {
10907                 return ret;
10908             }
10909             if (arg1 && put_user_u32(cpu, arg1)) {
10910                 return -TARGET_EFAULT;
10911             }
10912             if (arg2 && put_user_u32(node, arg2)) {
10913                 return -TARGET_EFAULT;
10914             }
10915         }
10916         return ret;
10917     case TARGET_NR_sched_setparam:
10918         {
10919             struct target_sched_param *target_schp;
10920             struct sched_param schp;
10921 
10922             if (arg2 == 0) {
10923                 return -TARGET_EINVAL;
10924             }
10925             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10926                 return -TARGET_EFAULT;
10927             }
10928             schp.sched_priority = tswap32(target_schp->sched_priority);
10929             unlock_user_struct(target_schp, arg2, 0);
10930             return get_errno(sys_sched_setparam(arg1, &schp));
10931         }
10932     case TARGET_NR_sched_getparam:
10933         {
10934             struct target_sched_param *target_schp;
10935             struct sched_param schp;
10936 
10937             if (arg2 == 0) {
10938                 return -TARGET_EINVAL;
10939             }
10940             ret = get_errno(sys_sched_getparam(arg1, &schp));
10941             if (!is_error(ret)) {
10942                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10943                     return -TARGET_EFAULT;
10944                 }
10945                 target_schp->sched_priority = tswap32(schp.sched_priority);
10946                 unlock_user_struct(target_schp, arg2, 1);
10947             }
10948         }
10949         return ret;
10950     case TARGET_NR_sched_setscheduler:
10951         {
10952             struct target_sched_param *target_schp;
10953             struct sched_param schp;
10954             if (arg3 == 0) {
10955                 return -TARGET_EINVAL;
10956             }
10957             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10958                 return -TARGET_EFAULT;
10959             }
10960             schp.sched_priority = tswap32(target_schp->sched_priority);
10961             unlock_user_struct(target_schp, arg3, 0);
10962             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10963         }
10964     case TARGET_NR_sched_getscheduler:
10965         return get_errno(sys_sched_getscheduler(arg1));
10966     case TARGET_NR_sched_getattr:
10967         {
10968             struct target_sched_attr *target_scha;
10969             struct sched_attr scha;
10970             if (arg2 == 0) {
10971                 return -TARGET_EINVAL;
10972             }
10973             if (arg3 > sizeof(scha)) {
10974                 arg3 = sizeof(scha);
10975             }
10976             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10977             if (!is_error(ret)) {
10978                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10979                 if (!target_scha) {
10980                     return -TARGET_EFAULT;
10981                 }
10982                 target_scha->size = tswap32(scha.size);
10983                 target_scha->sched_policy = tswap32(scha.sched_policy);
10984                 target_scha->sched_flags = tswap64(scha.sched_flags);
10985                 target_scha->sched_nice = tswap32(scha.sched_nice);
10986                 target_scha->sched_priority = tswap32(scha.sched_priority);
10987                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10988                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10989                 target_scha->sched_period = tswap64(scha.sched_period);
10990                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10991                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10992                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10993                 }
10994                 unlock_user(target_scha, arg2, arg3);
10995             }
10996             return ret;
10997         }
10998     case TARGET_NR_sched_setattr:
10999         {
11000             struct target_sched_attr *target_scha;
11001             struct sched_attr scha;
11002             uint32_t size;
11003             int zeroed;
11004             if (arg2 == 0) {
11005                 return -TARGET_EINVAL;
11006             }
11007             if (get_user_u32(size, arg2)) {
11008                 return -TARGET_EFAULT;
11009             }
11010             if (!size) {
11011                 size = offsetof(struct target_sched_attr, sched_util_min);
11012             }
11013             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11014                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11015                     return -TARGET_EFAULT;
11016                 }
11017                 return -TARGET_E2BIG;
11018             }
11019 
11020             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11021             if (zeroed < 0) {
11022                 return zeroed;
11023             } else if (zeroed == 0) {
11024                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11025                     return -TARGET_EFAULT;
11026                 }
11027                 return -TARGET_E2BIG;
11028             }
11029             if (size > sizeof(struct target_sched_attr)) {
11030                 size = sizeof(struct target_sched_attr);
11031             }
11032 
11033             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11034             if (!target_scha) {
11035                 return -TARGET_EFAULT;
11036             }
11037             scha.size = size;
11038             scha.sched_policy = tswap32(target_scha->sched_policy);
11039             scha.sched_flags = tswap64(target_scha->sched_flags);
11040             scha.sched_nice = tswap32(target_scha->sched_nice);
11041             scha.sched_priority = tswap32(target_scha->sched_priority);
11042             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11043             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11044             scha.sched_period = tswap64(target_scha->sched_period);
11045             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11046                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11047                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11048             }
11049             unlock_user(target_scha, arg2, 0);
11050             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11051         }
11052     case TARGET_NR_sched_yield:
11053         return get_errno(sched_yield());
11054     case TARGET_NR_sched_get_priority_max:
11055         return get_errno(sched_get_priority_max(arg1));
11056     case TARGET_NR_sched_get_priority_min:
11057         return get_errno(sched_get_priority_min(arg1));
11058 #ifdef TARGET_NR_sched_rr_get_interval
11059     case TARGET_NR_sched_rr_get_interval:
11060         {
11061             struct timespec ts;
11062             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11063             if (!is_error(ret)) {
11064                 ret = host_to_target_timespec(arg2, &ts);
11065             }
11066         }
11067         return ret;
11068 #endif
11069 #ifdef TARGET_NR_sched_rr_get_interval_time64
11070     case TARGET_NR_sched_rr_get_interval_time64:
11071         {
11072             struct timespec ts;
11073             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11074             if (!is_error(ret)) {
11075                 ret = host_to_target_timespec64(arg2, &ts);
11076             }
11077         }
11078         return ret;
11079 #endif
11080 #if defined(TARGET_NR_nanosleep)
11081     case TARGET_NR_nanosleep:
11082         {
11083             struct timespec req, rem;
11084             target_to_host_timespec(&req, arg1);
11085             ret = get_errno(safe_nanosleep(&req, &rem));
11086             if (is_error(ret) && arg2) {
11087                 host_to_target_timespec(arg2, &rem);
11088             }
11089         }
11090         return ret;
11091 #endif
11092     case TARGET_NR_prctl:
11093         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11094         break;
11095 #ifdef TARGET_NR_arch_prctl
11096     case TARGET_NR_arch_prctl:
11097         return do_arch_prctl(cpu_env, arg1, arg2);
11098 #endif
11099 #ifdef TARGET_NR_pread64
11100     case TARGET_NR_pread64:
11101         if (regpairs_aligned(cpu_env, num)) {
11102             arg4 = arg5;
11103             arg5 = arg6;
11104         }
11105         if (arg2 == 0 && arg3 == 0) {
11106             /* Special-case NULL buffer and zero length, which should succeed */
11107             p = 0;
11108         } else {
11109             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11110             if (!p) {
11111                 return -TARGET_EFAULT;
11112             }
11113         }
11114         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11115         unlock_user(p, arg2, ret);
11116         return ret;
11117     case TARGET_NR_pwrite64:
11118         if (regpairs_aligned(cpu_env, num)) {
11119             arg4 = arg5;
11120             arg5 = arg6;
11121         }
11122         if (arg2 == 0 && arg3 == 0) {
11123             /* Special-case NULL buffer and zero length, which should succeed */
11124             p = 0;
11125         } else {
11126             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11127             if (!p) {
11128                 return -TARGET_EFAULT;
11129             }
11130         }
11131         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11132         unlock_user(p, arg2, 0);
11133         return ret;
11134 #endif
11135     case TARGET_NR_getcwd:
11136         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11137             return -TARGET_EFAULT;
11138         ret = get_errno(sys_getcwd1(p, arg2));
11139         unlock_user(p, arg1, ret);
11140         return ret;
11141     case TARGET_NR_capget:
11142     case TARGET_NR_capset:
11143     {
11144         struct target_user_cap_header *target_header;
11145         struct target_user_cap_data *target_data = NULL;
11146         struct __user_cap_header_struct header;
11147         struct __user_cap_data_struct data[2];
11148         struct __user_cap_data_struct *dataptr = NULL;
11149         int i, target_datalen;
11150         int data_items = 1;
11151 
11152         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11153             return -TARGET_EFAULT;
11154         }
11155         header.version = tswap32(target_header->version);
11156         header.pid = tswap32(target_header->pid);
11157 
11158         if (header.version != _LINUX_CAPABILITY_VERSION) {
11159             /* Version 2 and up takes pointer to two user_data structs */
11160             data_items = 2;
11161         }
11162 
11163         target_datalen = sizeof(*target_data) * data_items;
11164 
11165         if (arg2) {
11166             if (num == TARGET_NR_capget) {
11167                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11168             } else {
11169                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11170             }
11171             if (!target_data) {
11172                 unlock_user_struct(target_header, arg1, 0);
11173                 return -TARGET_EFAULT;
11174             }
11175 
11176             if (num == TARGET_NR_capset) {
11177                 for (i = 0; i < data_items; i++) {
11178                     data[i].effective = tswap32(target_data[i].effective);
11179                     data[i].permitted = tswap32(target_data[i].permitted);
11180                     data[i].inheritable = tswap32(target_data[i].inheritable);
11181                 }
11182             }
11183 
11184             dataptr = data;
11185         }
11186 
11187         if (num == TARGET_NR_capget) {
11188             ret = get_errno(capget(&header, dataptr));
11189         } else {
11190             ret = get_errno(capset(&header, dataptr));
11191         }
11192 
11193         /* The kernel always updates version for both capget and capset */
11194         target_header->version = tswap32(header.version);
11195         unlock_user_struct(target_header, arg1, 1);
11196 
11197         if (arg2) {
11198             if (num == TARGET_NR_capget) {
11199                 for (i = 0; i < data_items; i++) {
11200                     target_data[i].effective = tswap32(data[i].effective);
11201                     target_data[i].permitted = tswap32(data[i].permitted);
11202                     target_data[i].inheritable = tswap32(data[i].inheritable);
11203                 }
11204                 unlock_user(target_data, arg2, target_datalen);
11205             } else {
11206                 unlock_user(target_data, arg2, 0);
11207             }
11208         }
11209         return ret;
11210     }
11211     case TARGET_NR_sigaltstack:
11212         return do_sigaltstack(arg1, arg2, cpu_env);
11213 
11214 #ifdef CONFIG_SENDFILE
11215 #ifdef TARGET_NR_sendfile
11216     case TARGET_NR_sendfile:
11217     {
11218         off_t *offp = NULL;
11219         off_t off;
11220         if (arg3) {
11221             ret = get_user_sal(off, arg3);
11222             if (is_error(ret)) {
11223                 return ret;
11224             }
11225             offp = &off;
11226         }
11227         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11228         if (!is_error(ret) && arg3) {
11229             abi_long ret2 = put_user_sal(off, arg3);
11230             if (is_error(ret2)) {
11231                 ret = ret2;
11232             }
11233         }
11234         return ret;
11235     }
11236 #endif
11237 #ifdef TARGET_NR_sendfile64
11238     case TARGET_NR_sendfile64:
11239     {
11240         off_t *offp = NULL;
11241         off_t off;
11242         if (arg3) {
11243             ret = get_user_s64(off, arg3);
11244             if (is_error(ret)) {
11245                 return ret;
11246             }
11247             offp = &off;
11248         }
11249         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11250         if (!is_error(ret) && arg3) {
11251             abi_long ret2 = put_user_s64(off, arg3);
11252             if (is_error(ret2)) {
11253                 ret = ret2;
11254             }
11255         }
11256         return ret;
11257     }
11258 #endif
11259 #endif
11260 #ifdef TARGET_NR_vfork
11261     case TARGET_NR_vfork:
11262         return get_errno(do_fork(cpu_env,
11263                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11264                          0, 0, 0, 0));
11265 #endif
11266 #ifdef TARGET_NR_ugetrlimit
11267     case TARGET_NR_ugetrlimit:
11268     {
11269 	struct rlimit rlim;
11270 	int resource = target_to_host_resource(arg1);
11271 	ret = get_errno(getrlimit(resource, &rlim));
11272 	if (!is_error(ret)) {
11273 	    struct target_rlimit *target_rlim;
11274             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11275                 return -TARGET_EFAULT;
11276 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11277 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11278             unlock_user_struct(target_rlim, arg2, 1);
11279 	}
11280         return ret;
11281     }
11282 #endif
11283 #ifdef TARGET_NR_truncate64
11284     case TARGET_NR_truncate64:
11285         if (!(p = lock_user_string(arg1)))
11286             return -TARGET_EFAULT;
11287 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11288         unlock_user(p, arg1, 0);
11289         return ret;
11290 #endif
11291 #ifdef TARGET_NR_ftruncate64
11292     case TARGET_NR_ftruncate64:
11293         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11294 #endif
11295 #ifdef TARGET_NR_stat64
11296     case TARGET_NR_stat64:
11297         if (!(p = lock_user_string(arg1))) {
11298             return -TARGET_EFAULT;
11299         }
11300         ret = get_errno(stat(path(p), &st));
11301         unlock_user(p, arg1, 0);
11302         if (!is_error(ret))
11303             ret = host_to_target_stat64(cpu_env, arg2, &st);
11304         return ret;
11305 #endif
11306 #ifdef TARGET_NR_lstat64
11307     case TARGET_NR_lstat64:
11308         if (!(p = lock_user_string(arg1))) {
11309             return -TARGET_EFAULT;
11310         }
11311         ret = get_errno(lstat(path(p), &st));
11312         unlock_user(p, arg1, 0);
11313         if (!is_error(ret))
11314             ret = host_to_target_stat64(cpu_env, arg2, &st);
11315         return ret;
11316 #endif
11317 #ifdef TARGET_NR_fstat64
11318     case TARGET_NR_fstat64:
11319         ret = get_errno(fstat(arg1, &st));
11320         if (!is_error(ret))
11321             ret = host_to_target_stat64(cpu_env, arg2, &st);
11322         return ret;
11323 #endif
11324 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11325 #ifdef TARGET_NR_fstatat64
11326     case TARGET_NR_fstatat64:
11327 #endif
11328 #ifdef TARGET_NR_newfstatat
11329     case TARGET_NR_newfstatat:
11330 #endif
11331         if (!(p = lock_user_string(arg2))) {
11332             return -TARGET_EFAULT;
11333         }
11334         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11335         unlock_user(p, arg2, 0);
11336         if (!is_error(ret))
11337             ret = host_to_target_stat64(cpu_env, arg3, &st);
11338         return ret;
11339 #endif
11340 #if defined(TARGET_NR_statx)
11341     case TARGET_NR_statx:
11342         {
11343             struct target_statx *target_stx;
11344             int dirfd = arg1;
11345             int flags = arg3;
11346 
11347             p = lock_user_string(arg2);
11348             if (p == NULL) {
11349                 return -TARGET_EFAULT;
11350             }
11351 #if defined(__NR_statx)
11352             {
11353                 /*
11354                  * It is assumed that struct statx is architecture independent.
11355                  */
11356                 struct target_statx host_stx;
11357                 int mask = arg4;
11358 
11359                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11360                 if (!is_error(ret)) {
11361                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11362                         unlock_user(p, arg2, 0);
11363                         return -TARGET_EFAULT;
11364                     }
11365                 }
11366 
11367                 if (ret != -TARGET_ENOSYS) {
11368                     unlock_user(p, arg2, 0);
11369                     return ret;
11370                 }
11371             }
11372 #endif
11373             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11374             unlock_user(p, arg2, 0);
11375 
11376             if (!is_error(ret)) {
11377                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11378                     return -TARGET_EFAULT;
11379                 }
11380                 memset(target_stx, 0, sizeof(*target_stx));
11381                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11382                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11383                 __put_user(st.st_ino, &target_stx->stx_ino);
11384                 __put_user(st.st_mode, &target_stx->stx_mode);
11385                 __put_user(st.st_uid, &target_stx->stx_uid);
11386                 __put_user(st.st_gid, &target_stx->stx_gid);
11387                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11388                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11389                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11390                 __put_user(st.st_size, &target_stx->stx_size);
11391                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11392                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11393                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11394                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11395                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11396                 unlock_user_struct(target_stx, arg5, 1);
11397             }
11398         }
11399         return ret;
11400 #endif
11401 #ifdef TARGET_NR_lchown
11402     case TARGET_NR_lchown:
11403         if (!(p = lock_user_string(arg1)))
11404             return -TARGET_EFAULT;
11405         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11406         unlock_user(p, arg1, 0);
11407         return ret;
11408 #endif
11409 #ifdef TARGET_NR_getuid
11410     case TARGET_NR_getuid:
11411         return get_errno(high2lowuid(getuid()));
11412 #endif
11413 #ifdef TARGET_NR_getgid
11414     case TARGET_NR_getgid:
11415         return get_errno(high2lowgid(getgid()));
11416 #endif
11417 #ifdef TARGET_NR_geteuid
11418     case TARGET_NR_geteuid:
11419         return get_errno(high2lowuid(geteuid()));
11420 #endif
11421 #ifdef TARGET_NR_getegid
11422     case TARGET_NR_getegid:
11423         return get_errno(high2lowgid(getegid()));
11424 #endif
11425     case TARGET_NR_setreuid:
11426         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11427     case TARGET_NR_setregid:
11428         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11429     case TARGET_NR_getgroups:
11430         {
11431             int gidsetsize = arg1;
11432             target_id *target_grouplist;
11433             gid_t *grouplist;
11434             int i;
11435 
11436             grouplist = alloca(gidsetsize * sizeof(gid_t));
11437             ret = get_errno(getgroups(gidsetsize, grouplist));
11438             if (gidsetsize == 0)
11439                 return ret;
11440             if (!is_error(ret)) {
11441                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11442                 if (!target_grouplist)
11443                     return -TARGET_EFAULT;
11444                 for(i = 0;i < ret; i++)
11445                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11446                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11447             }
11448         }
11449         return ret;
11450     case TARGET_NR_setgroups:
11451         {
11452             int gidsetsize = arg1;
11453             target_id *target_grouplist;
11454             gid_t *grouplist = NULL;
11455             int i;
11456             if (gidsetsize) {
11457                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11458                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11459                 if (!target_grouplist) {
11460                     return -TARGET_EFAULT;
11461                 }
11462                 for (i = 0; i < gidsetsize; i++) {
11463                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11464                 }
11465                 unlock_user(target_grouplist, arg2, 0);
11466             }
11467             return get_errno(setgroups(gidsetsize, grouplist));
11468         }
11469     case TARGET_NR_fchown:
11470         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11471 #if defined(TARGET_NR_fchownat)
11472     case TARGET_NR_fchownat:
11473         if (!(p = lock_user_string(arg2)))
11474             return -TARGET_EFAULT;
11475         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11476                                  low2highgid(arg4), arg5));
11477         unlock_user(p, arg2, 0);
11478         return ret;
11479 #endif
11480 #ifdef TARGET_NR_setresuid
11481     case TARGET_NR_setresuid:
11482         return get_errno(sys_setresuid(low2highuid(arg1),
11483                                        low2highuid(arg2),
11484                                        low2highuid(arg3)));
11485 #endif
11486 #ifdef TARGET_NR_getresuid
11487     case TARGET_NR_getresuid:
11488         {
11489             uid_t ruid, euid, suid;
11490             ret = get_errno(getresuid(&ruid, &euid, &suid));
11491             if (!is_error(ret)) {
11492                 if (put_user_id(high2lowuid(ruid), arg1)
11493                     || put_user_id(high2lowuid(euid), arg2)
11494                     || put_user_id(high2lowuid(suid), arg3))
11495                     return -TARGET_EFAULT;
11496             }
11497         }
11498         return ret;
11499 #endif
11500 #ifdef TARGET_NR_getresgid
11501     case TARGET_NR_setresgid:
11502         return get_errno(sys_setresgid(low2highgid(arg1),
11503                                        low2highgid(arg2),
11504                                        low2highgid(arg3)));
11505 #endif
11506 #ifdef TARGET_NR_getresgid
11507     case TARGET_NR_getresgid:
11508         {
11509             gid_t rgid, egid, sgid;
11510             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11511             if (!is_error(ret)) {
11512                 if (put_user_id(high2lowgid(rgid), arg1)
11513                     || put_user_id(high2lowgid(egid), arg2)
11514                     || put_user_id(high2lowgid(sgid), arg3))
11515                     return -TARGET_EFAULT;
11516             }
11517         }
11518         return ret;
11519 #endif
11520 #ifdef TARGET_NR_chown
11521     case TARGET_NR_chown:
11522         if (!(p = lock_user_string(arg1)))
11523             return -TARGET_EFAULT;
11524         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11525         unlock_user(p, arg1, 0);
11526         return ret;
11527 #endif
11528     case TARGET_NR_setuid:
11529         return get_errno(sys_setuid(low2highuid(arg1)));
11530     case TARGET_NR_setgid:
11531         return get_errno(sys_setgid(low2highgid(arg1)));
11532     case TARGET_NR_setfsuid:
11533         return get_errno(setfsuid(arg1));
11534     case TARGET_NR_setfsgid:
11535         return get_errno(setfsgid(arg1));
11536 
11537 #ifdef TARGET_NR_lchown32
11538     case TARGET_NR_lchown32:
11539         if (!(p = lock_user_string(arg1)))
11540             return -TARGET_EFAULT;
11541         ret = get_errno(lchown(p, arg2, arg3));
11542         unlock_user(p, arg1, 0);
11543         return ret;
11544 #endif
11545 #ifdef TARGET_NR_getuid32
11546     case TARGET_NR_getuid32:
11547         return get_errno(getuid());
11548 #endif
11549 
11550 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11551    /* Alpha specific */
11552     case TARGET_NR_getxuid:
11553          {
11554             uid_t euid;
11555             euid=geteuid();
11556             cpu_env->ir[IR_A4]=euid;
11557          }
11558         return get_errno(getuid());
11559 #endif
11560 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11561    /* Alpha specific */
11562     case TARGET_NR_getxgid:
11563          {
11564             uid_t egid;
11565             egid=getegid();
11566             cpu_env->ir[IR_A4]=egid;
11567          }
11568         return get_errno(getgid());
11569 #endif
11570 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11571     /* Alpha specific */
11572     case TARGET_NR_osf_getsysinfo:
11573         ret = -TARGET_EOPNOTSUPP;
11574         switch (arg1) {
11575           case TARGET_GSI_IEEE_FP_CONTROL:
11576             {
11577                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11578                 uint64_t swcr = cpu_env->swcr;
11579 
11580                 swcr &= ~SWCR_STATUS_MASK;
11581                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11582 
11583                 if (put_user_u64 (swcr, arg2))
11584                         return -TARGET_EFAULT;
11585                 ret = 0;
11586             }
11587             break;
11588 
11589           /* case GSI_IEEE_STATE_AT_SIGNAL:
11590              -- Not implemented in linux kernel.
11591              case GSI_UACPROC:
11592              -- Retrieves current unaligned access state; not much used.
11593              case GSI_PROC_TYPE:
11594              -- Retrieves implver information; surely not used.
11595              case GSI_GET_HWRPB:
11596              -- Grabs a copy of the HWRPB; surely not used.
11597           */
11598         }
11599         return ret;
11600 #endif
11601 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11602     /* Alpha specific */
11603     case TARGET_NR_osf_setsysinfo:
11604         ret = -TARGET_EOPNOTSUPP;
11605         switch (arg1) {
11606           case TARGET_SSI_IEEE_FP_CONTROL:
11607             {
11608                 uint64_t swcr, fpcr;
11609 
11610                 if (get_user_u64 (swcr, arg2)) {
11611                     return -TARGET_EFAULT;
11612                 }
11613 
11614                 /*
11615                  * The kernel calls swcr_update_status to update the
11616                  * status bits from the fpcr at every point that it
11617                  * could be queried.  Therefore, we store the status
11618                  * bits only in FPCR.
11619                  */
11620                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11621 
11622                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11623                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11624                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11625                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11626                 ret = 0;
11627             }
11628             break;
11629 
11630           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11631             {
11632                 uint64_t exc, fpcr, fex;
11633 
11634                 if (get_user_u64(exc, arg2)) {
11635                     return -TARGET_EFAULT;
11636                 }
11637                 exc &= SWCR_STATUS_MASK;
11638                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11639 
11640                 /* Old exceptions are not signaled.  */
11641                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11642                 fex = exc & ~fex;
11643                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11644                 fex &= (cpu_env)->swcr;
11645 
11646                 /* Update the hardware fpcr.  */
11647                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11648                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11649 
11650                 if (fex) {
11651                     int si_code = TARGET_FPE_FLTUNK;
11652                     target_siginfo_t info;
11653 
11654                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11655                         si_code = TARGET_FPE_FLTUND;
11656                     }
11657                     if (fex & SWCR_TRAP_ENABLE_INE) {
11658                         si_code = TARGET_FPE_FLTRES;
11659                     }
11660                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11661                         si_code = TARGET_FPE_FLTUND;
11662                     }
11663                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11664                         si_code = TARGET_FPE_FLTOVF;
11665                     }
11666                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11667                         si_code = TARGET_FPE_FLTDIV;
11668                     }
11669                     if (fex & SWCR_TRAP_ENABLE_INV) {
11670                         si_code = TARGET_FPE_FLTINV;
11671                     }
11672 
11673                     info.si_signo = SIGFPE;
11674                     info.si_errno = 0;
11675                     info.si_code = si_code;
11676                     info._sifields._sigfault._addr = (cpu_env)->pc;
11677                     queue_signal(cpu_env, info.si_signo,
11678                                  QEMU_SI_FAULT, &info);
11679                 }
11680                 ret = 0;
11681             }
11682             break;
11683 
11684           /* case SSI_NVPAIRS:
11685              -- Used with SSIN_UACPROC to enable unaligned accesses.
11686              case SSI_IEEE_STATE_AT_SIGNAL:
11687              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11688              -- Not implemented in linux kernel
11689           */
11690         }
11691         return ret;
11692 #endif
11693 #ifdef TARGET_NR_osf_sigprocmask
11694     /* Alpha specific.  */
11695     case TARGET_NR_osf_sigprocmask:
11696         {
11697             abi_ulong mask;
11698             int how;
11699             sigset_t set, oldset;
11700 
11701             switch(arg1) {
11702             case TARGET_SIG_BLOCK:
11703                 how = SIG_BLOCK;
11704                 break;
11705             case TARGET_SIG_UNBLOCK:
11706                 how = SIG_UNBLOCK;
11707                 break;
11708             case TARGET_SIG_SETMASK:
11709                 how = SIG_SETMASK;
11710                 break;
11711             default:
11712                 return -TARGET_EINVAL;
11713             }
11714             mask = arg2;
11715             target_to_host_old_sigset(&set, &mask);
11716             ret = do_sigprocmask(how, &set, &oldset);
11717             if (!ret) {
11718                 host_to_target_old_sigset(&mask, &oldset);
11719                 ret = mask;
11720             }
11721         }
11722         return ret;
11723 #endif
11724 
11725 #ifdef TARGET_NR_getgid32
11726     case TARGET_NR_getgid32:
11727         return get_errno(getgid());
11728 #endif
11729 #ifdef TARGET_NR_geteuid32
11730     case TARGET_NR_geteuid32:
11731         return get_errno(geteuid());
11732 #endif
11733 #ifdef TARGET_NR_getegid32
11734     case TARGET_NR_getegid32:
11735         return get_errno(getegid());
11736 #endif
11737 #ifdef TARGET_NR_setreuid32
11738     case TARGET_NR_setreuid32:
11739         return get_errno(setreuid(arg1, arg2));
11740 #endif
11741 #ifdef TARGET_NR_setregid32
11742     case TARGET_NR_setregid32:
11743         return get_errno(setregid(arg1, arg2));
11744 #endif
11745 #ifdef TARGET_NR_getgroups32
11746     case TARGET_NR_getgroups32:
11747         {
11748             int gidsetsize = arg1;
11749             uint32_t *target_grouplist;
11750             gid_t *grouplist;
11751             int i;
11752 
11753             grouplist = alloca(gidsetsize * sizeof(gid_t));
11754             ret = get_errno(getgroups(gidsetsize, grouplist));
11755             if (gidsetsize == 0)
11756                 return ret;
11757             if (!is_error(ret)) {
11758                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11759                 if (!target_grouplist) {
11760                     return -TARGET_EFAULT;
11761                 }
11762                 for(i = 0;i < ret; i++)
11763                     target_grouplist[i] = tswap32(grouplist[i]);
11764                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11765             }
11766         }
11767         return ret;
11768 #endif
11769 #ifdef TARGET_NR_setgroups32
11770     case TARGET_NR_setgroups32:
11771         {
11772             int gidsetsize = arg1;
11773             uint32_t *target_grouplist;
11774             gid_t *grouplist;
11775             int i;
11776 
11777             grouplist = alloca(gidsetsize * sizeof(gid_t));
11778             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11779             if (!target_grouplist) {
11780                 return -TARGET_EFAULT;
11781             }
11782             for(i = 0;i < gidsetsize; i++)
11783                 grouplist[i] = tswap32(target_grouplist[i]);
11784             unlock_user(target_grouplist, arg2, 0);
11785             return get_errno(setgroups(gidsetsize, grouplist));
11786         }
11787 #endif
11788 #ifdef TARGET_NR_fchown32
11789     case TARGET_NR_fchown32:
11790         return get_errno(fchown(arg1, arg2, arg3));
11791 #endif
11792 #ifdef TARGET_NR_setresuid32
11793     case TARGET_NR_setresuid32:
11794         return get_errno(sys_setresuid(arg1, arg2, arg3));
11795 #endif
11796 #ifdef TARGET_NR_getresuid32
11797     case TARGET_NR_getresuid32:
11798         {
11799             uid_t ruid, euid, suid;
11800             ret = get_errno(getresuid(&ruid, &euid, &suid));
11801             if (!is_error(ret)) {
11802                 if (put_user_u32(ruid, arg1)
11803                     || put_user_u32(euid, arg2)
11804                     || put_user_u32(suid, arg3))
11805                     return -TARGET_EFAULT;
11806             }
11807         }
11808         return ret;
11809 #endif
11810 #ifdef TARGET_NR_setresgid32
11811     case TARGET_NR_setresgid32:
11812         return get_errno(sys_setresgid(arg1, arg2, arg3));
11813 #endif
11814 #ifdef TARGET_NR_getresgid32
11815     case TARGET_NR_getresgid32:
11816         {
11817             gid_t rgid, egid, sgid;
11818             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11819             if (!is_error(ret)) {
11820                 if (put_user_u32(rgid, arg1)
11821                     || put_user_u32(egid, arg2)
11822                     || put_user_u32(sgid, arg3))
11823                     return -TARGET_EFAULT;
11824             }
11825         }
11826         return ret;
11827 #endif
11828 #ifdef TARGET_NR_chown32
11829     case TARGET_NR_chown32:
11830         if (!(p = lock_user_string(arg1)))
11831             return -TARGET_EFAULT;
11832         ret = get_errno(chown(p, arg2, arg3));
11833         unlock_user(p, arg1, 0);
11834         return ret;
11835 #endif
11836 #ifdef TARGET_NR_setuid32
11837     case TARGET_NR_setuid32:
11838         return get_errno(sys_setuid(arg1));
11839 #endif
11840 #ifdef TARGET_NR_setgid32
11841     case TARGET_NR_setgid32:
11842         return get_errno(sys_setgid(arg1));
11843 #endif
11844 #ifdef TARGET_NR_setfsuid32
11845     case TARGET_NR_setfsuid32:
11846         return get_errno(setfsuid(arg1));
11847 #endif
11848 #ifdef TARGET_NR_setfsgid32
11849     case TARGET_NR_setfsgid32:
11850         return get_errno(setfsgid(arg1));
11851 #endif
11852 #ifdef TARGET_NR_mincore
11853     case TARGET_NR_mincore:
11854         {
11855             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11856             if (!a) {
11857                 return -TARGET_ENOMEM;
11858             }
11859             p = lock_user_string(arg3);
11860             if (!p) {
11861                 ret = -TARGET_EFAULT;
11862             } else {
11863                 ret = get_errno(mincore(a, arg2, p));
11864                 unlock_user(p, arg3, ret);
11865             }
11866             unlock_user(a, arg1, 0);
11867         }
11868         return ret;
11869 #endif
11870 #ifdef TARGET_NR_arm_fadvise64_64
11871     case TARGET_NR_arm_fadvise64_64:
11872         /* arm_fadvise64_64 looks like fadvise64_64 but
11873          * with different argument order: fd, advice, offset, len
11874          * rather than the usual fd, offset, len, advice.
11875          * Note that offset and len are both 64-bit so appear as
11876          * pairs of 32-bit registers.
11877          */
11878         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11879                             target_offset64(arg5, arg6), arg2);
11880         return -host_to_target_errno(ret);
11881 #endif
11882 
11883 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11884 
11885 #ifdef TARGET_NR_fadvise64_64
11886     case TARGET_NR_fadvise64_64:
11887 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11888         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11889         ret = arg2;
11890         arg2 = arg3;
11891         arg3 = arg4;
11892         arg4 = arg5;
11893         arg5 = arg6;
11894         arg6 = ret;
11895 #else
11896         /* 6 args: fd, offset (high, low), len (high, low), advice */
11897         if (regpairs_aligned(cpu_env, num)) {
11898             /* offset is in (3,4), len in (5,6) and advice in 7 */
11899             arg2 = arg3;
11900             arg3 = arg4;
11901             arg4 = arg5;
11902             arg5 = arg6;
11903             arg6 = arg7;
11904         }
11905 #endif
11906         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11907                             target_offset64(arg4, arg5), arg6);
11908         return -host_to_target_errno(ret);
11909 #endif
11910 
11911 #ifdef TARGET_NR_fadvise64
11912     case TARGET_NR_fadvise64:
11913         /* 5 args: fd, offset (high, low), len, advice */
11914         if (regpairs_aligned(cpu_env, num)) {
11915             /* offset is in (3,4), len in 5 and advice in 6 */
11916             arg2 = arg3;
11917             arg3 = arg4;
11918             arg4 = arg5;
11919             arg5 = arg6;
11920         }
11921         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11922         return -host_to_target_errno(ret);
11923 #endif
11924 
11925 #else /* not a 32-bit ABI */
11926 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11927 #ifdef TARGET_NR_fadvise64_64
11928     case TARGET_NR_fadvise64_64:
11929 #endif
11930 #ifdef TARGET_NR_fadvise64
11931     case TARGET_NR_fadvise64:
11932 #endif
11933 #ifdef TARGET_S390X
11934         switch (arg4) {
11935         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11936         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11937         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11938         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11939         default: break;
11940         }
11941 #endif
11942         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11943 #endif
11944 #endif /* end of 64-bit ABI fadvise handling */
11945 
11946 #ifdef TARGET_NR_madvise
11947     case TARGET_NR_madvise:
11948         return target_madvise(arg1, arg2, arg3);
11949 #endif
11950 #ifdef TARGET_NR_fcntl64
11951     case TARGET_NR_fcntl64:
11952     {
11953         int cmd;
11954         struct flock64 fl;
11955         from_flock64_fn *copyfrom = copy_from_user_flock64;
11956         to_flock64_fn *copyto = copy_to_user_flock64;
11957 
11958 #ifdef TARGET_ARM
11959         if (!cpu_env->eabi) {
11960             copyfrom = copy_from_user_oabi_flock64;
11961             copyto = copy_to_user_oabi_flock64;
11962         }
11963 #endif
11964 
11965         cmd = target_to_host_fcntl_cmd(arg2);
11966         if (cmd == -TARGET_EINVAL) {
11967             return cmd;
11968         }
11969 
11970         switch(arg2) {
11971         case TARGET_F_GETLK64:
11972             ret = copyfrom(&fl, arg3);
11973             if (ret) {
11974                 break;
11975             }
11976             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11977             if (ret == 0) {
11978                 ret = copyto(arg3, &fl);
11979             }
11980 	    break;
11981 
11982         case TARGET_F_SETLK64:
11983         case TARGET_F_SETLKW64:
11984             ret = copyfrom(&fl, arg3);
11985             if (ret) {
11986                 break;
11987             }
11988             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11989 	    break;
11990         default:
11991             ret = do_fcntl(arg1, arg2, arg3);
11992             break;
11993         }
11994         return ret;
11995     }
11996 #endif
11997 #ifdef TARGET_NR_cacheflush
11998     case TARGET_NR_cacheflush:
11999         /* self-modifying code is handled automatically, so nothing needed */
12000         return 0;
12001 #endif
12002 #ifdef TARGET_NR_getpagesize
12003     case TARGET_NR_getpagesize:
12004         return TARGET_PAGE_SIZE;
12005 #endif
12006     case TARGET_NR_gettid:
12007         return get_errno(sys_gettid());
12008 #ifdef TARGET_NR_readahead
12009     case TARGET_NR_readahead:
12010 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12011         if (regpairs_aligned(cpu_env, num)) {
12012             arg2 = arg3;
12013             arg3 = arg4;
12014             arg4 = arg5;
12015         }
12016         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12017 #else
12018         ret = get_errno(readahead(arg1, arg2, arg3));
12019 #endif
12020         return ret;
12021 #endif
12022 #ifdef CONFIG_ATTR
12023 #ifdef TARGET_NR_setxattr
12024     case TARGET_NR_listxattr:
12025     case TARGET_NR_llistxattr:
12026     {
12027         void *p, *b = 0;
12028         if (arg2) {
12029             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12030             if (!b) {
12031                 return -TARGET_EFAULT;
12032             }
12033         }
12034         p = lock_user_string(arg1);
12035         if (p) {
12036             if (num == TARGET_NR_listxattr) {
12037                 ret = get_errno(listxattr(p, b, arg3));
12038             } else {
12039                 ret = get_errno(llistxattr(p, b, arg3));
12040             }
12041         } else {
12042             ret = -TARGET_EFAULT;
12043         }
12044         unlock_user(p, arg1, 0);
12045         unlock_user(b, arg2, arg3);
12046         return ret;
12047     }
12048     case TARGET_NR_flistxattr:
12049     {
12050         void *b = 0;
12051         if (arg2) {
12052             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12053             if (!b) {
12054                 return -TARGET_EFAULT;
12055             }
12056         }
12057         ret = get_errno(flistxattr(arg1, b, arg3));
12058         unlock_user(b, arg2, arg3);
12059         return ret;
12060     }
12061     case TARGET_NR_setxattr:
12062     case TARGET_NR_lsetxattr:
12063         {
12064             void *p, *n, *v = 0;
12065             if (arg3) {
12066                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12067                 if (!v) {
12068                     return -TARGET_EFAULT;
12069                 }
12070             }
12071             p = lock_user_string(arg1);
12072             n = lock_user_string(arg2);
12073             if (p && n) {
12074                 if (num == TARGET_NR_setxattr) {
12075                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12076                 } else {
12077                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12078                 }
12079             } else {
12080                 ret = -TARGET_EFAULT;
12081             }
12082             unlock_user(p, arg1, 0);
12083             unlock_user(n, arg2, 0);
12084             unlock_user(v, arg3, 0);
12085         }
12086         return ret;
12087     case TARGET_NR_fsetxattr:
12088         {
12089             void *n, *v = 0;
12090             if (arg3) {
12091                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12092                 if (!v) {
12093                     return -TARGET_EFAULT;
12094                 }
12095             }
12096             n = lock_user_string(arg2);
12097             if (n) {
12098                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12099             } else {
12100                 ret = -TARGET_EFAULT;
12101             }
12102             unlock_user(n, arg2, 0);
12103             unlock_user(v, arg3, 0);
12104         }
12105         return ret;
12106     case TARGET_NR_getxattr:
12107     case TARGET_NR_lgetxattr:
12108         {
12109             void *p, *n, *v = 0;
12110             if (arg3) {
12111                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12112                 if (!v) {
12113                     return -TARGET_EFAULT;
12114                 }
12115             }
12116             p = lock_user_string(arg1);
12117             n = lock_user_string(arg2);
12118             if (p && n) {
12119                 if (num == TARGET_NR_getxattr) {
12120                     ret = get_errno(getxattr(p, n, v, arg4));
12121                 } else {
12122                     ret = get_errno(lgetxattr(p, n, v, arg4));
12123                 }
12124             } else {
12125                 ret = -TARGET_EFAULT;
12126             }
12127             unlock_user(p, arg1, 0);
12128             unlock_user(n, arg2, 0);
12129             unlock_user(v, arg3, arg4);
12130         }
12131         return ret;
12132     case TARGET_NR_fgetxattr:
12133         {
12134             void *n, *v = 0;
12135             if (arg3) {
12136                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12137                 if (!v) {
12138                     return -TARGET_EFAULT;
12139                 }
12140             }
12141             n = lock_user_string(arg2);
12142             if (n) {
12143                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12144             } else {
12145                 ret = -TARGET_EFAULT;
12146             }
12147             unlock_user(n, arg2, 0);
12148             unlock_user(v, arg3, arg4);
12149         }
12150         return ret;
12151     case TARGET_NR_removexattr:
12152     case TARGET_NR_lremovexattr:
12153         {
12154             void *p, *n;
12155             p = lock_user_string(arg1);
12156             n = lock_user_string(arg2);
12157             if (p && n) {
12158                 if (num == TARGET_NR_removexattr) {
12159                     ret = get_errno(removexattr(p, n));
12160                 } else {
12161                     ret = get_errno(lremovexattr(p, n));
12162                 }
12163             } else {
12164                 ret = -TARGET_EFAULT;
12165             }
12166             unlock_user(p, arg1, 0);
12167             unlock_user(n, arg2, 0);
12168         }
12169         return ret;
12170     case TARGET_NR_fremovexattr:
12171         {
12172             void *n;
12173             n = lock_user_string(arg2);
12174             if (n) {
12175                 ret = get_errno(fremovexattr(arg1, n));
12176             } else {
12177                 ret = -TARGET_EFAULT;
12178             }
12179             unlock_user(n, arg2, 0);
12180         }
12181         return ret;
12182 #endif
12183 #endif /* CONFIG_ATTR */
12184 #ifdef TARGET_NR_set_thread_area
12185     case TARGET_NR_set_thread_area:
12186 #if defined(TARGET_MIPS)
12187       cpu_env->active_tc.CP0_UserLocal = arg1;
12188       return 0;
12189 #elif defined(TARGET_CRIS)
12190       if (arg1 & 0xff)
12191           ret = -TARGET_EINVAL;
12192       else {
12193           cpu_env->pregs[PR_PID] = arg1;
12194           ret = 0;
12195       }
12196       return ret;
12197 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12198       return do_set_thread_area(cpu_env, arg1);
12199 #elif defined(TARGET_M68K)
12200       {
12201           TaskState *ts = cpu->opaque;
12202           ts->tp_value = arg1;
12203           return 0;
12204       }
12205 #else
12206       return -TARGET_ENOSYS;
12207 #endif
12208 #endif
12209 #ifdef TARGET_NR_get_thread_area
12210     case TARGET_NR_get_thread_area:
12211 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12212         return do_get_thread_area(cpu_env, arg1);
12213 #elif defined(TARGET_M68K)
12214         {
12215             TaskState *ts = cpu->opaque;
12216             return ts->tp_value;
12217         }
12218 #else
12219         return -TARGET_ENOSYS;
12220 #endif
12221 #endif
12222 #ifdef TARGET_NR_getdomainname
12223     case TARGET_NR_getdomainname:
12224         return -TARGET_ENOSYS;
12225 #endif
12226 
12227 #ifdef TARGET_NR_clock_settime
12228     case TARGET_NR_clock_settime:
12229     {
12230         struct timespec ts;
12231 
12232         ret = target_to_host_timespec(&ts, arg2);
12233         if (!is_error(ret)) {
12234             ret = get_errno(clock_settime(arg1, &ts));
12235         }
12236         return ret;
12237     }
12238 #endif
12239 #ifdef TARGET_NR_clock_settime64
12240     case TARGET_NR_clock_settime64:
12241     {
12242         struct timespec ts;
12243 
12244         ret = target_to_host_timespec64(&ts, arg2);
12245         if (!is_error(ret)) {
12246             ret = get_errno(clock_settime(arg1, &ts));
12247         }
12248         return ret;
12249     }
12250 #endif
12251 #ifdef TARGET_NR_clock_gettime
12252     case TARGET_NR_clock_gettime:
12253     {
12254         struct timespec ts;
12255         ret = get_errno(clock_gettime(arg1, &ts));
12256         if (!is_error(ret)) {
12257             ret = host_to_target_timespec(arg2, &ts);
12258         }
12259         return ret;
12260     }
12261 #endif
12262 #ifdef TARGET_NR_clock_gettime64
12263     case TARGET_NR_clock_gettime64:
12264     {
12265         struct timespec ts;
12266         ret = get_errno(clock_gettime(arg1, &ts));
12267         if (!is_error(ret)) {
12268             ret = host_to_target_timespec64(arg2, &ts);
12269         }
12270         return ret;
12271     }
12272 #endif
12273 #ifdef TARGET_NR_clock_getres
12274     case TARGET_NR_clock_getres:
12275     {
12276         struct timespec ts;
12277         ret = get_errno(clock_getres(arg1, &ts));
12278         if (!is_error(ret)) {
12279             host_to_target_timespec(arg2, &ts);
12280         }
12281         return ret;
12282     }
12283 #endif
12284 #ifdef TARGET_NR_clock_getres_time64
12285     case TARGET_NR_clock_getres_time64:
12286     {
12287         struct timespec ts;
12288         ret = get_errno(clock_getres(arg1, &ts));
12289         if (!is_error(ret)) {
12290             host_to_target_timespec64(arg2, &ts);
12291         }
12292         return ret;
12293     }
12294 #endif
12295 #ifdef TARGET_NR_clock_nanosleep
12296     case TARGET_NR_clock_nanosleep:
12297     {
12298         struct timespec ts;
12299         if (target_to_host_timespec(&ts, arg3)) {
12300             return -TARGET_EFAULT;
12301         }
12302         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12303                                              &ts, arg4 ? &ts : NULL));
12304         /*
12305          * if the call is interrupted by a signal handler, it fails
12306          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12307          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12308          */
12309         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12310             host_to_target_timespec(arg4, &ts)) {
12311               return -TARGET_EFAULT;
12312         }
12313 
12314         return ret;
12315     }
12316 #endif
12317 #ifdef TARGET_NR_clock_nanosleep_time64
12318     case TARGET_NR_clock_nanosleep_time64:
12319     {
12320         struct timespec ts;
12321 
12322         if (target_to_host_timespec64(&ts, arg3)) {
12323             return -TARGET_EFAULT;
12324         }
12325 
12326         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12327                                              &ts, arg4 ? &ts : NULL));
12328 
12329         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12330             host_to_target_timespec64(arg4, &ts)) {
12331             return -TARGET_EFAULT;
12332         }
12333         return ret;
12334     }
12335 #endif
12336 
12337 #if defined(TARGET_NR_set_tid_address)
12338     case TARGET_NR_set_tid_address:
12339     {
12340         TaskState *ts = cpu->opaque;
12341         ts->child_tidptr = arg1;
12342         /* do not call host set_tid_address() syscall, instead return tid() */
12343         return get_errno(sys_gettid());
12344     }
12345 #endif
12346 
12347     case TARGET_NR_tkill:
12348         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12349 
12350     case TARGET_NR_tgkill:
12351         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12352                          target_to_host_signal(arg3)));
12353 
12354 #ifdef TARGET_NR_set_robust_list
12355     case TARGET_NR_set_robust_list:
12356     case TARGET_NR_get_robust_list:
12357         /* The ABI for supporting robust futexes has userspace pass
12358          * the kernel a pointer to a linked list which is updated by
12359          * userspace after the syscall; the list is walked by the kernel
12360          * when the thread exits. Since the linked list in QEMU guest
12361          * memory isn't a valid linked list for the host and we have
12362          * no way to reliably intercept the thread-death event, we can't
12363          * support these. Silently return ENOSYS so that guest userspace
12364          * falls back to a non-robust futex implementation (which should
12365          * be OK except in the corner case of the guest crashing while
12366          * holding a mutex that is shared with another process via
12367          * shared memory).
12368          */
12369         return -TARGET_ENOSYS;
12370 #endif
12371 
12372 #if defined(TARGET_NR_utimensat)
12373     case TARGET_NR_utimensat:
12374         {
12375             struct timespec *tsp, ts[2];
12376             if (!arg3) {
12377                 tsp = NULL;
12378             } else {
12379                 if (target_to_host_timespec(ts, arg3)) {
12380                     return -TARGET_EFAULT;
12381                 }
12382                 if (target_to_host_timespec(ts + 1, arg3 +
12383                                             sizeof(struct target_timespec))) {
12384                     return -TARGET_EFAULT;
12385                 }
12386                 tsp = ts;
12387             }
12388             if (!arg2)
12389                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12390             else {
12391                 if (!(p = lock_user_string(arg2))) {
12392                     return -TARGET_EFAULT;
12393                 }
12394                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12395                 unlock_user(p, arg2, 0);
12396             }
12397         }
12398         return ret;
12399 #endif
12400 #ifdef TARGET_NR_utimensat_time64
12401     case TARGET_NR_utimensat_time64:
12402         {
12403             struct timespec *tsp, ts[2];
12404             if (!arg3) {
12405                 tsp = NULL;
12406             } else {
12407                 if (target_to_host_timespec64(ts, arg3)) {
12408                     return -TARGET_EFAULT;
12409                 }
12410                 if (target_to_host_timespec64(ts + 1, arg3 +
12411                                      sizeof(struct target__kernel_timespec))) {
12412                     return -TARGET_EFAULT;
12413                 }
12414                 tsp = ts;
12415             }
12416             if (!arg2)
12417                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12418             else {
12419                 p = lock_user_string(arg2);
12420                 if (!p) {
12421                     return -TARGET_EFAULT;
12422                 }
12423                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12424                 unlock_user(p, arg2, 0);
12425             }
12426         }
12427         return ret;
12428 #endif
12429 #ifdef TARGET_NR_futex
12430     case TARGET_NR_futex:
12431         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12432 #endif
12433 #ifdef TARGET_NR_futex_time64
12434     case TARGET_NR_futex_time64:
12435         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12436 #endif
12437 #ifdef CONFIG_INOTIFY
12438 #if defined(TARGET_NR_inotify_init)
12439     case TARGET_NR_inotify_init:
12440         ret = get_errno(inotify_init());
12441         if (ret >= 0) {
12442             fd_trans_register(ret, &target_inotify_trans);
12443         }
12444         return ret;
12445 #endif
12446 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12447     case TARGET_NR_inotify_init1:
12448         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12449                                           fcntl_flags_tbl)));
12450         if (ret >= 0) {
12451             fd_trans_register(ret, &target_inotify_trans);
12452         }
12453         return ret;
12454 #endif
12455 #if defined(TARGET_NR_inotify_add_watch)
12456     case TARGET_NR_inotify_add_watch:
12457         p = lock_user_string(arg2);
12458         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12459         unlock_user(p, arg2, 0);
12460         return ret;
12461 #endif
12462 #if defined(TARGET_NR_inotify_rm_watch)
12463     case TARGET_NR_inotify_rm_watch:
12464         return get_errno(inotify_rm_watch(arg1, arg2));
12465 #endif
12466 #endif
12467 
12468 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12469     case TARGET_NR_mq_open:
12470         {
12471             struct mq_attr posix_mq_attr;
12472             struct mq_attr *pposix_mq_attr;
12473             int host_flags;
12474 
12475             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12476             pposix_mq_attr = NULL;
12477             if (arg4) {
12478                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12479                     return -TARGET_EFAULT;
12480                 }
12481                 pposix_mq_attr = &posix_mq_attr;
12482             }
12483             p = lock_user_string(arg1 - 1);
12484             if (!p) {
12485                 return -TARGET_EFAULT;
12486             }
12487             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12488             unlock_user (p, arg1, 0);
12489         }
12490         return ret;
12491 
12492     case TARGET_NR_mq_unlink:
12493         p = lock_user_string(arg1 - 1);
12494         if (!p) {
12495             return -TARGET_EFAULT;
12496         }
12497         ret = get_errno(mq_unlink(p));
12498         unlock_user (p, arg1, 0);
12499         return ret;
12500 
12501 #ifdef TARGET_NR_mq_timedsend
12502     case TARGET_NR_mq_timedsend:
12503         {
12504             struct timespec ts;
12505 
12506             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12507             if (arg5 != 0) {
12508                 if (target_to_host_timespec(&ts, arg5)) {
12509                     return -TARGET_EFAULT;
12510                 }
12511                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12512                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12513                     return -TARGET_EFAULT;
12514                 }
12515             } else {
12516                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12517             }
12518             unlock_user (p, arg2, arg3);
12519         }
12520         return ret;
12521 #endif
12522 #ifdef TARGET_NR_mq_timedsend_time64
12523     case TARGET_NR_mq_timedsend_time64:
12524         {
12525             struct timespec ts;
12526 
12527             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12528             if (arg5 != 0) {
12529                 if (target_to_host_timespec64(&ts, arg5)) {
12530                     return -TARGET_EFAULT;
12531                 }
12532                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12533                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12534                     return -TARGET_EFAULT;
12535                 }
12536             } else {
12537                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12538             }
12539             unlock_user(p, arg2, arg3);
12540         }
12541         return ret;
12542 #endif
12543 
12544 #ifdef TARGET_NR_mq_timedreceive
12545     case TARGET_NR_mq_timedreceive:
12546         {
12547             struct timespec ts;
12548             unsigned int prio;
12549 
12550             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12551             if (arg5 != 0) {
12552                 if (target_to_host_timespec(&ts, arg5)) {
12553                     return -TARGET_EFAULT;
12554                 }
12555                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12556                                                      &prio, &ts));
12557                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12558                     return -TARGET_EFAULT;
12559                 }
12560             } else {
12561                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12562                                                      &prio, NULL));
12563             }
12564             unlock_user (p, arg2, arg3);
12565             if (arg4 != 0)
12566                 put_user_u32(prio, arg4);
12567         }
12568         return ret;
12569 #endif
12570 #ifdef TARGET_NR_mq_timedreceive_time64
12571     case TARGET_NR_mq_timedreceive_time64:
12572         {
12573             struct timespec ts;
12574             unsigned int prio;
12575 
12576             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12577             if (arg5 != 0) {
12578                 if (target_to_host_timespec64(&ts, arg5)) {
12579                     return -TARGET_EFAULT;
12580                 }
12581                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12582                                                      &prio, &ts));
12583                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12584                     return -TARGET_EFAULT;
12585                 }
12586             } else {
12587                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12588                                                      &prio, NULL));
12589             }
12590             unlock_user(p, arg2, arg3);
12591             if (arg4 != 0) {
12592                 put_user_u32(prio, arg4);
12593             }
12594         }
12595         return ret;
12596 #endif
12597 
12598     /* Not implemented for now... */
12599 /*     case TARGET_NR_mq_notify: */
12600 /*         break; */
12601 
12602     case TARGET_NR_mq_getsetattr:
12603         {
12604             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12605             ret = 0;
12606             if (arg2 != 0) {
12607                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12608                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12609                                            &posix_mq_attr_out));
12610             } else if (arg3 != 0) {
12611                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12612             }
12613             if (ret == 0 && arg3 != 0) {
12614                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12615             }
12616         }
12617         return ret;
12618 #endif
12619 
12620 #ifdef CONFIG_SPLICE
12621 #ifdef TARGET_NR_tee
12622     case TARGET_NR_tee:
12623         {
12624             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12625         }
12626         return ret;
12627 #endif
12628 #ifdef TARGET_NR_splice
12629     case TARGET_NR_splice:
12630         {
12631             loff_t loff_in, loff_out;
12632             loff_t *ploff_in = NULL, *ploff_out = NULL;
12633             if (arg2) {
12634                 if (get_user_u64(loff_in, arg2)) {
12635                     return -TARGET_EFAULT;
12636                 }
12637                 ploff_in = &loff_in;
12638             }
12639             if (arg4) {
12640                 if (get_user_u64(loff_out, arg4)) {
12641                     return -TARGET_EFAULT;
12642                 }
12643                 ploff_out = &loff_out;
12644             }
12645             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12646             if (arg2) {
12647                 if (put_user_u64(loff_in, arg2)) {
12648                     return -TARGET_EFAULT;
12649                 }
12650             }
12651             if (arg4) {
12652                 if (put_user_u64(loff_out, arg4)) {
12653                     return -TARGET_EFAULT;
12654                 }
12655             }
12656         }
12657         return ret;
12658 #endif
12659 #ifdef TARGET_NR_vmsplice
12660 	case TARGET_NR_vmsplice:
12661         {
12662             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12663             if (vec != NULL) {
12664                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12665                 unlock_iovec(vec, arg2, arg3, 0);
12666             } else {
12667                 ret = -host_to_target_errno(errno);
12668             }
12669         }
12670         return ret;
12671 #endif
12672 #endif /* CONFIG_SPLICE */
12673 #ifdef CONFIG_EVENTFD
12674 #if defined(TARGET_NR_eventfd)
12675     case TARGET_NR_eventfd:
12676         ret = get_errno(eventfd(arg1, 0));
12677         if (ret >= 0) {
12678             fd_trans_register(ret, &target_eventfd_trans);
12679         }
12680         return ret;
12681 #endif
12682 #if defined(TARGET_NR_eventfd2)
12683     case TARGET_NR_eventfd2:
12684     {
12685         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12686         if (arg2 & TARGET_O_NONBLOCK) {
12687             host_flags |= O_NONBLOCK;
12688         }
12689         if (arg2 & TARGET_O_CLOEXEC) {
12690             host_flags |= O_CLOEXEC;
12691         }
12692         ret = get_errno(eventfd(arg1, host_flags));
12693         if (ret >= 0) {
12694             fd_trans_register(ret, &target_eventfd_trans);
12695         }
12696         return ret;
12697     }
12698 #endif
12699 #endif /* CONFIG_EVENTFD  */
12700 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12701     case TARGET_NR_fallocate:
12702 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12703         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12704                                   target_offset64(arg5, arg6)));
12705 #else
12706         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12707 #endif
12708         return ret;
12709 #endif
12710 #if defined(CONFIG_SYNC_FILE_RANGE)
12711 #if defined(TARGET_NR_sync_file_range)
12712     case TARGET_NR_sync_file_range:
12713 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12714 #if defined(TARGET_MIPS)
12715         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12716                                         target_offset64(arg5, arg6), arg7));
12717 #else
12718         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12719                                         target_offset64(arg4, arg5), arg6));
12720 #endif /* !TARGET_MIPS */
12721 #else
12722         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12723 #endif
12724         return ret;
12725 #endif
12726 #if defined(TARGET_NR_sync_file_range2) || \
12727     defined(TARGET_NR_arm_sync_file_range)
12728 #if defined(TARGET_NR_sync_file_range2)
12729     case TARGET_NR_sync_file_range2:
12730 #endif
12731 #if defined(TARGET_NR_arm_sync_file_range)
12732     case TARGET_NR_arm_sync_file_range:
12733 #endif
12734         /* This is like sync_file_range but the arguments are reordered */
12735 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12736         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12737                                         target_offset64(arg5, arg6), arg2));
12738 #else
12739         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12740 #endif
12741         return ret;
12742 #endif
12743 #endif
12744 #if defined(TARGET_NR_signalfd4)
12745     case TARGET_NR_signalfd4:
12746         return do_signalfd4(arg1, arg2, arg4);
12747 #endif
12748 #if defined(TARGET_NR_signalfd)
12749     case TARGET_NR_signalfd:
12750         return do_signalfd4(arg1, arg2, 0);
12751 #endif
12752 #if defined(CONFIG_EPOLL)
12753 #if defined(TARGET_NR_epoll_create)
12754     case TARGET_NR_epoll_create:
12755         return get_errno(epoll_create(arg1));
12756 #endif
12757 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12758     case TARGET_NR_epoll_create1:
12759         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12760 #endif
12761 #if defined(TARGET_NR_epoll_ctl)
12762     case TARGET_NR_epoll_ctl:
12763     {
12764         struct epoll_event ep;
12765         struct epoll_event *epp = 0;
12766         if (arg4) {
12767             if (arg2 != EPOLL_CTL_DEL) {
12768                 struct target_epoll_event *target_ep;
12769                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12770                     return -TARGET_EFAULT;
12771                 }
12772                 ep.events = tswap32(target_ep->events);
12773                 /*
12774                  * The epoll_data_t union is just opaque data to the kernel,
12775                  * so we transfer all 64 bits across and need not worry what
12776                  * actual data type it is.
12777                  */
12778                 ep.data.u64 = tswap64(target_ep->data.u64);
12779                 unlock_user_struct(target_ep, arg4, 0);
12780             }
12781             /*
12782              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12783              * non-null pointer, even though this argument is ignored.
12784              *
12785              */
12786             epp = &ep;
12787         }
12788         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12789     }
12790 #endif
12791 
12792 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12793 #if defined(TARGET_NR_epoll_wait)
12794     case TARGET_NR_epoll_wait:
12795 #endif
12796 #if defined(TARGET_NR_epoll_pwait)
12797     case TARGET_NR_epoll_pwait:
12798 #endif
12799     {
12800         struct target_epoll_event *target_ep;
12801         struct epoll_event *ep;
12802         int epfd = arg1;
12803         int maxevents = arg3;
12804         int timeout = arg4;
12805 
12806         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12807             return -TARGET_EINVAL;
12808         }
12809 
12810         target_ep = lock_user(VERIFY_WRITE, arg2,
12811                               maxevents * sizeof(struct target_epoll_event), 1);
12812         if (!target_ep) {
12813             return -TARGET_EFAULT;
12814         }
12815 
12816         ep = g_try_new(struct epoll_event, maxevents);
12817         if (!ep) {
12818             unlock_user(target_ep, arg2, 0);
12819             return -TARGET_ENOMEM;
12820         }
12821 
12822         switch (num) {
12823 #if defined(TARGET_NR_epoll_pwait)
12824         case TARGET_NR_epoll_pwait:
12825         {
12826             sigset_t *set = NULL;
12827 
12828             if (arg5) {
12829                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12830                 if (ret != 0) {
12831                     break;
12832                 }
12833             }
12834 
12835             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12836                                              set, SIGSET_T_SIZE));
12837 
12838             if (set) {
12839                 finish_sigsuspend_mask(ret);
12840             }
12841             break;
12842         }
12843 #endif
12844 #if defined(TARGET_NR_epoll_wait)
12845         case TARGET_NR_epoll_wait:
12846             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12847                                              NULL, 0));
12848             break;
12849 #endif
12850         default:
12851             ret = -TARGET_ENOSYS;
12852         }
12853         if (!is_error(ret)) {
12854             int i;
12855             for (i = 0; i < ret; i++) {
12856                 target_ep[i].events = tswap32(ep[i].events);
12857                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12858             }
12859             unlock_user(target_ep, arg2,
12860                         ret * sizeof(struct target_epoll_event));
12861         } else {
12862             unlock_user(target_ep, arg2, 0);
12863         }
12864         g_free(ep);
12865         return ret;
12866     }
12867 #endif
12868 #endif
12869 #ifdef TARGET_NR_prlimit64
12870     case TARGET_NR_prlimit64:
12871     {
12872         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12873         struct target_rlimit64 *target_rnew, *target_rold;
12874         struct host_rlimit64 rnew, rold, *rnewp = 0;
12875         int resource = target_to_host_resource(arg2);
12876 
12877         if (arg3 && (resource != RLIMIT_AS &&
12878                      resource != RLIMIT_DATA &&
12879                      resource != RLIMIT_STACK)) {
12880             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12881                 return -TARGET_EFAULT;
12882             }
12883             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
12884             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
12885             unlock_user_struct(target_rnew, arg3, 0);
12886             rnewp = &rnew;
12887         }
12888 
12889         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12890         if (!is_error(ret) && arg4) {
12891             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12892                 return -TARGET_EFAULT;
12893             }
12894             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
12895             __put_user(rold.rlim_max, &target_rold->rlim_max);
12896             unlock_user_struct(target_rold, arg4, 1);
12897         }
12898         return ret;
12899     }
12900 #endif
12901 #ifdef TARGET_NR_gethostname
12902     case TARGET_NR_gethostname:
12903     {
12904         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12905         if (name) {
12906             ret = get_errno(gethostname(name, arg2));
12907             unlock_user(name, arg1, arg2);
12908         } else {
12909             ret = -TARGET_EFAULT;
12910         }
12911         return ret;
12912     }
12913 #endif
12914 #ifdef TARGET_NR_atomic_cmpxchg_32
12915     case TARGET_NR_atomic_cmpxchg_32:
12916     {
12917         /* should use start_exclusive from main.c */
12918         abi_ulong mem_value;
12919         if (get_user_u32(mem_value, arg6)) {
12920             target_siginfo_t info;
12921             info.si_signo = SIGSEGV;
12922             info.si_errno = 0;
12923             info.si_code = TARGET_SEGV_MAPERR;
12924             info._sifields._sigfault._addr = arg6;
12925             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12926             ret = 0xdeadbeef;
12927 
12928         }
12929         if (mem_value == arg2)
12930             put_user_u32(arg1, arg6);
12931         return mem_value;
12932     }
12933 #endif
12934 #ifdef TARGET_NR_atomic_barrier
12935     case TARGET_NR_atomic_barrier:
12936         /* Like the kernel implementation and the
12937            qemu arm barrier, no-op this? */
12938         return 0;
12939 #endif
12940 
12941 #ifdef TARGET_NR_timer_create
12942     case TARGET_NR_timer_create:
12943     {
12944         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12945 
12946         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12947 
12948         int clkid = arg1;
12949         int timer_index = next_free_host_timer();
12950 
12951         if (timer_index < 0) {
12952             ret = -TARGET_EAGAIN;
12953         } else {
12954             timer_t *phtimer = g_posix_timers  + timer_index;
12955 
12956             if (arg2) {
12957                 phost_sevp = &host_sevp;
12958                 ret = target_to_host_sigevent(phost_sevp, arg2);
12959                 if (ret != 0) {
12960                     free_host_timer_slot(timer_index);
12961                     return ret;
12962                 }
12963             }
12964 
12965             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12966             if (ret) {
12967                 free_host_timer_slot(timer_index);
12968             } else {
12969                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12970                     timer_delete(*phtimer);
12971                     free_host_timer_slot(timer_index);
12972                     return -TARGET_EFAULT;
12973                 }
12974             }
12975         }
12976         return ret;
12977     }
12978 #endif
12979 
12980 #ifdef TARGET_NR_timer_settime
12981     case TARGET_NR_timer_settime:
12982     {
12983         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12984          * struct itimerspec * old_value */
12985         target_timer_t timerid = get_timer_id(arg1);
12986 
12987         if (timerid < 0) {
12988             ret = timerid;
12989         } else if (arg3 == 0) {
12990             ret = -TARGET_EINVAL;
12991         } else {
12992             timer_t htimer = g_posix_timers[timerid];
12993             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12994 
12995             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12996                 return -TARGET_EFAULT;
12997             }
12998             ret = get_errno(
12999                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13000             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13001                 return -TARGET_EFAULT;
13002             }
13003         }
13004         return ret;
13005     }
13006 #endif
13007 
13008 #ifdef TARGET_NR_timer_settime64
13009     case TARGET_NR_timer_settime64:
13010     {
13011         target_timer_t timerid = get_timer_id(arg1);
13012 
13013         if (timerid < 0) {
13014             ret = timerid;
13015         } else if (arg3 == 0) {
13016             ret = -TARGET_EINVAL;
13017         } else {
13018             timer_t htimer = g_posix_timers[timerid];
13019             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13020 
13021             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13022                 return -TARGET_EFAULT;
13023             }
13024             ret = get_errno(
13025                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13026             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13027                 return -TARGET_EFAULT;
13028             }
13029         }
13030         return ret;
13031     }
13032 #endif
13033 
13034 #ifdef TARGET_NR_timer_gettime
13035     case TARGET_NR_timer_gettime:
13036     {
13037         /* args: timer_t timerid, struct itimerspec *curr_value */
13038         target_timer_t timerid = get_timer_id(arg1);
13039 
13040         if (timerid < 0) {
13041             ret = timerid;
13042         } else if (!arg2) {
13043             ret = -TARGET_EFAULT;
13044         } else {
13045             timer_t htimer = g_posix_timers[timerid];
13046             struct itimerspec hspec;
13047             ret = get_errno(timer_gettime(htimer, &hspec));
13048 
13049             if (host_to_target_itimerspec(arg2, &hspec)) {
13050                 ret = -TARGET_EFAULT;
13051             }
13052         }
13053         return ret;
13054     }
13055 #endif
13056 
13057 #ifdef TARGET_NR_timer_gettime64
13058     case TARGET_NR_timer_gettime64:
13059     {
13060         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13061         target_timer_t timerid = get_timer_id(arg1);
13062 
13063         if (timerid < 0) {
13064             ret = timerid;
13065         } else if (!arg2) {
13066             ret = -TARGET_EFAULT;
13067         } else {
13068             timer_t htimer = g_posix_timers[timerid];
13069             struct itimerspec hspec;
13070             ret = get_errno(timer_gettime(htimer, &hspec));
13071 
13072             if (host_to_target_itimerspec64(arg2, &hspec)) {
13073                 ret = -TARGET_EFAULT;
13074             }
13075         }
13076         return ret;
13077     }
13078 #endif
13079 
13080 #ifdef TARGET_NR_timer_getoverrun
13081     case TARGET_NR_timer_getoverrun:
13082     {
13083         /* args: timer_t timerid */
13084         target_timer_t timerid = get_timer_id(arg1);
13085 
13086         if (timerid < 0) {
13087             ret = timerid;
13088         } else {
13089             timer_t htimer = g_posix_timers[timerid];
13090             ret = get_errno(timer_getoverrun(htimer));
13091         }
13092         return ret;
13093     }
13094 #endif
13095 
13096 #ifdef TARGET_NR_timer_delete
13097     case TARGET_NR_timer_delete:
13098     {
13099         /* args: timer_t timerid */
13100         target_timer_t timerid = get_timer_id(arg1);
13101 
13102         if (timerid < 0) {
13103             ret = timerid;
13104         } else {
13105             timer_t htimer = g_posix_timers[timerid];
13106             ret = get_errno(timer_delete(htimer));
13107             free_host_timer_slot(timerid);
13108         }
13109         return ret;
13110     }
13111 #endif
13112 
13113 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13114     case TARGET_NR_timerfd_create:
13115         ret = get_errno(timerfd_create(arg1,
13116                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13117         if (ret >= 0) {
13118             fd_trans_register(ret, &target_timerfd_trans);
13119         }
13120         return ret;
13121 #endif
13122 
13123 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13124     case TARGET_NR_timerfd_gettime:
13125         {
13126             struct itimerspec its_curr;
13127 
13128             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13129 
13130             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13131                 return -TARGET_EFAULT;
13132             }
13133         }
13134         return ret;
13135 #endif
13136 
13137 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13138     case TARGET_NR_timerfd_gettime64:
13139         {
13140             struct itimerspec its_curr;
13141 
13142             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13143 
13144             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13145                 return -TARGET_EFAULT;
13146             }
13147         }
13148         return ret;
13149 #endif
13150 
13151 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13152     case TARGET_NR_timerfd_settime:
13153         {
13154             struct itimerspec its_new, its_old, *p_new;
13155 
13156             if (arg3) {
13157                 if (target_to_host_itimerspec(&its_new, arg3)) {
13158                     return -TARGET_EFAULT;
13159                 }
13160                 p_new = &its_new;
13161             } else {
13162                 p_new = NULL;
13163             }
13164 
13165             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13166 
13167             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13168                 return -TARGET_EFAULT;
13169             }
13170         }
13171         return ret;
13172 #endif
13173 
13174 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13175     case TARGET_NR_timerfd_settime64:
13176         {
13177             struct itimerspec its_new, its_old, *p_new;
13178 
13179             if (arg3) {
13180                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13181                     return -TARGET_EFAULT;
13182                 }
13183                 p_new = &its_new;
13184             } else {
13185                 p_new = NULL;
13186             }
13187 
13188             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13189 
13190             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13191                 return -TARGET_EFAULT;
13192             }
13193         }
13194         return ret;
13195 #endif
13196 
13197 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13198     case TARGET_NR_ioprio_get:
13199         return get_errno(ioprio_get(arg1, arg2));
13200 #endif
13201 
13202 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13203     case TARGET_NR_ioprio_set:
13204         return get_errno(ioprio_set(arg1, arg2, arg3));
13205 #endif
13206 
13207 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13208     case TARGET_NR_setns:
13209         return get_errno(setns(arg1, arg2));
13210 #endif
13211 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13212     case TARGET_NR_unshare:
13213         return get_errno(unshare(arg1));
13214 #endif
13215 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13216     case TARGET_NR_kcmp:
13217         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13218 #endif
13219 #ifdef TARGET_NR_swapcontext
13220     case TARGET_NR_swapcontext:
13221         /* PowerPC specific.  */
13222         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13223 #endif
13224 #ifdef TARGET_NR_memfd_create
13225     case TARGET_NR_memfd_create:
13226         p = lock_user_string(arg1);
13227         if (!p) {
13228             return -TARGET_EFAULT;
13229         }
13230         ret = get_errno(memfd_create(p, arg2));
13231         fd_trans_unregister(ret);
13232         unlock_user(p, arg1, 0);
13233         return ret;
13234 #endif
13235 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13236     case TARGET_NR_membarrier:
13237         return get_errno(membarrier(arg1, arg2));
13238 #endif
13239 
13240 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13241     case TARGET_NR_copy_file_range:
13242         {
13243             loff_t inoff, outoff;
13244             loff_t *pinoff = NULL, *poutoff = NULL;
13245 
13246             if (arg2) {
13247                 if (get_user_u64(inoff, arg2)) {
13248                     return -TARGET_EFAULT;
13249                 }
13250                 pinoff = &inoff;
13251             }
13252             if (arg4) {
13253                 if (get_user_u64(outoff, arg4)) {
13254                     return -TARGET_EFAULT;
13255                 }
13256                 poutoff = &outoff;
13257             }
13258             /* Do not sign-extend the count parameter. */
13259             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13260                                                  (abi_ulong)arg5, arg6));
13261             if (!is_error(ret) && ret > 0) {
13262                 if (arg2) {
13263                     if (put_user_u64(inoff, arg2)) {
13264                         return -TARGET_EFAULT;
13265                     }
13266                 }
13267                 if (arg4) {
13268                     if (put_user_u64(outoff, arg4)) {
13269                         return -TARGET_EFAULT;
13270                     }
13271                 }
13272             }
13273         }
13274         return ret;
13275 #endif
13276 
13277 #if defined(TARGET_NR_pivot_root)
13278     case TARGET_NR_pivot_root:
13279         {
13280             void *p2;
13281             p = lock_user_string(arg1); /* new_root */
13282             p2 = lock_user_string(arg2); /* put_old */
13283             if (!p || !p2) {
13284                 ret = -TARGET_EFAULT;
13285             } else {
13286                 ret = get_errno(pivot_root(p, p2));
13287             }
13288             unlock_user(p2, arg2, 0);
13289             unlock_user(p, arg1, 0);
13290         }
13291         return ret;
13292 #endif
13293 
13294     default:
13295         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13296         return -TARGET_ENOSYS;
13297     }
13298     return ret;
13299 }
13300 
13301 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13302                     abi_long arg2, abi_long arg3, abi_long arg4,
13303                     abi_long arg5, abi_long arg6, abi_long arg7,
13304                     abi_long arg8)
13305 {
13306     CPUState *cpu = env_cpu(cpu_env);
13307     abi_long ret;
13308 
13309 #ifdef DEBUG_ERESTARTSYS
13310     /* Debug-only code for exercising the syscall-restart code paths
13311      * in the per-architecture cpu main loops: restart every syscall
13312      * the guest makes once before letting it through.
13313      */
13314     {
13315         static bool flag;
13316         flag = !flag;
13317         if (flag) {
13318             return -QEMU_ERESTARTSYS;
13319         }
13320     }
13321 #endif
13322 
13323     record_syscall_start(cpu, num, arg1,
13324                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13325 
13326     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13327         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13328     }
13329 
13330     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13331                       arg5, arg6, arg7, arg8);
13332 
13333     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13334         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13335                           arg3, arg4, arg5, arg6);
13336     }
13337 
13338     record_syscall_return(cpu, num, ret);
13339     return ret;
13340 }
13341