xref: /openbmc/qemu/linux-user/syscall.c (revision e0174afe)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 #include "cpu_loop-common.h"
144 
145 #ifndef CLONE_IO
146 #define CLONE_IO                0x80000000      /* Clone io context */
147 #endif
148 
149 /* We can't directly call the host clone syscall, because this will
150  * badly confuse libc (breaking mutexes, for example). So we must
151  * divide clone flags into:
152  *  * flag combinations that look like pthread_create()
153  *  * flag combinations that look like fork()
154  *  * flags we can implement within QEMU itself
155  *  * flags we can't support and will return an error for
156  */
157 /* For thread creation, all these flags must be present; for
158  * fork, none must be present.
159  */
160 #define CLONE_THREAD_FLAGS                              \
161     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
162      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
163 
164 /* These flags are ignored:
165  * CLONE_DETACHED is now ignored by the kernel;
166  * CLONE_IO is just an optimisation hint to the I/O scheduler
167  */
168 #define CLONE_IGNORED_FLAGS                     \
169     (CLONE_DETACHED | CLONE_IO)
170 
171 /* Flags for fork which we can implement within QEMU itself */
172 #define CLONE_OPTIONAL_FORK_FLAGS               \
173     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
174      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
175 
176 /* Flags for thread creation which we can implement within QEMU itself */
177 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
178     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
179      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
180 
181 #define CLONE_INVALID_FORK_FLAGS                                        \
182     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
183 
184 #define CLONE_INVALID_THREAD_FLAGS                                      \
185     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
186        CLONE_IGNORED_FLAGS))
187 
188 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
189  * have almost all been allocated. We cannot support any of
190  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
191  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
192  * The checks against the invalid thread masks above will catch these.
193  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
194  */
195 
196 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
197  * once. This exercises the codepaths for restart.
198  */
199 //#define DEBUG_ERESTARTSYS
200 
201 //#include <linux/msdos_fs.h>
202 #define VFAT_IOCTL_READDIR_BOTH \
203     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
204 #define VFAT_IOCTL_READDIR_SHORT \
205     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
206 
207 #undef _syscall0
208 #undef _syscall1
209 #undef _syscall2
210 #undef _syscall3
211 #undef _syscall4
212 #undef _syscall5
213 #undef _syscall6
214 
215 #define _syscall0(type,name)		\
216 static type name (void)			\
217 {					\
218 	return syscall(__NR_##name);	\
219 }
220 
221 #define _syscall1(type,name,type1,arg1)		\
222 static type name (type1 arg1)			\
223 {						\
224 	return syscall(__NR_##name, arg1);	\
225 }
226 
227 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
228 static type name (type1 arg1,type2 arg2)		\
229 {							\
230 	return syscall(__NR_##name, arg1, arg2);	\
231 }
232 
233 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
234 static type name (type1 arg1,type2 arg2,type3 arg3)		\
235 {								\
236 	return syscall(__NR_##name, arg1, arg2, arg3);		\
237 }
238 
239 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
241 {										\
242 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
243 }
244 
245 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
246 		  type5,arg5)							\
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
248 {										\
249 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
250 }
251 
252 
253 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
254 		  type5,arg5,type6,arg6)					\
255 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
256                   type6 arg6)							\
257 {										\
258 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
259 }
260 
261 
262 #define __NR_sys_uname __NR_uname
263 #define __NR_sys_getcwd1 __NR_getcwd
264 #define __NR_sys_getdents __NR_getdents
265 #define __NR_sys_getdents64 __NR_getdents64
266 #define __NR_sys_getpriority __NR_getpriority
267 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
268 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
269 #define __NR_sys_syslog __NR_syslog
270 #if defined(__NR_futex)
271 # define __NR_sys_futex __NR_futex
272 #endif
273 #if defined(__NR_futex_time64)
274 # define __NR_sys_futex_time64 __NR_futex_time64
275 #endif
276 #define __NR_sys_statx __NR_statx
277 
278 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
279 #define __NR__llseek __NR_lseek
280 #endif
281 
282 /* Newer kernel ports have llseek() instead of _llseek() */
283 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
284 #define TARGET_NR__llseek TARGET_NR_llseek
285 #endif
286 
287 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
288 #ifndef TARGET_O_NONBLOCK_MASK
289 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
290 #endif
291 
292 #define __NR_sys_gettid __NR_gettid
293 _syscall0(int, sys_gettid)
294 
295 /* For the 64-bit guest on 32-bit host case we must emulate
296  * getdents using getdents64, because otherwise the host
297  * might hand us back more dirent records than we can fit
298  * into the guest buffer after structure format conversion.
299  * Otherwise we emulate getdents with getdents if the host has it.
300  */
301 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
302 #define EMULATE_GETDENTS_WITH_GETDENTS
303 #endif
304 
305 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
306 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
307 #endif
308 #if (defined(TARGET_NR_getdents) && \
309       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
310     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
311 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
312 #endif
313 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
314 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
315           loff_t *, res, uint, wh);
316 #endif
317 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
318 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
319           siginfo_t *, uinfo)
320 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
321 #ifdef __NR_exit_group
322 _syscall1(int,exit_group,int,error_code)
323 #endif
324 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
325 #define __NR_sys_close_range __NR_close_range
326 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
327 #ifndef CLOSE_RANGE_CLOEXEC
328 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
329 #endif
330 #endif
331 #if defined(__NR_futex)
332 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
333           const struct timespec *,timeout,int *,uaddr2,int,val3)
334 #endif
335 #if defined(__NR_futex_time64)
336 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
337           const struct timespec *,timeout,int *,uaddr2,int,val3)
338 #endif
339 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
340 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
341 #endif
342 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
343 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
344                              unsigned int, flags);
345 #endif
346 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
347 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
348 #endif
349 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
350 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
351           unsigned long *, user_mask_ptr);
352 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
353 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
354           unsigned long *, user_mask_ptr);
355 /* sched_attr is not defined in glibc */
356 struct sched_attr {
357     uint32_t size;
358     uint32_t sched_policy;
359     uint64_t sched_flags;
360     int32_t sched_nice;
361     uint32_t sched_priority;
362     uint64_t sched_runtime;
363     uint64_t sched_deadline;
364     uint64_t sched_period;
365     uint32_t sched_util_min;
366     uint32_t sched_util_max;
367 };
368 #define __NR_sys_sched_getattr __NR_sched_getattr
369 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
370           unsigned int, size, unsigned int, flags);
371 #define __NR_sys_sched_setattr __NR_sched_setattr
372 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
373           unsigned int, flags);
374 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
375 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
376 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
377 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
378           const struct sched_param *, param);
379 #define __NR_sys_sched_getparam __NR_sched_getparam
380 _syscall2(int, sys_sched_getparam, pid_t, pid,
381           struct sched_param *, param);
382 #define __NR_sys_sched_setparam __NR_sched_setparam
383 _syscall2(int, sys_sched_setparam, pid_t, pid,
384           const struct sched_param *, param);
385 #define __NR_sys_getcpu __NR_getcpu
386 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
387 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
388           void *, arg);
389 _syscall2(int, capget, struct __user_cap_header_struct *, header,
390           struct __user_cap_data_struct *, data);
391 _syscall2(int, capset, struct __user_cap_header_struct *, header,
392           struct __user_cap_data_struct *, data);
393 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
394 _syscall2(int, ioprio_get, int, which, int, who)
395 #endif
396 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
397 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
398 #endif
399 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
400 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
401 #endif
402 
403 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
404 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
405           unsigned long, idx1, unsigned long, idx2)
406 #endif
407 
408 /*
409  * It is assumed that struct statx is architecture independent.
410  */
411 #if defined(TARGET_NR_statx) && defined(__NR_statx)
412 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
413           unsigned int, mask, struct target_statx *, statxbuf)
414 #endif
415 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
416 _syscall2(int, membarrier, int, cmd, int, flags)
417 #endif
418 
419 static const bitmask_transtbl fcntl_flags_tbl[] = {
420   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
421   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
422   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
423   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
424   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
425   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
426   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
427   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
428   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
429   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
430   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
431   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
432   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
433 #if defined(O_DIRECT)
434   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
435 #endif
436 #if defined(O_NOATIME)
437   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
438 #endif
439 #if defined(O_CLOEXEC)
440   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
441 #endif
442 #if defined(O_PATH)
443   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
444 #endif
445 #if defined(O_TMPFILE)
446   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
447 #endif
448   /* Don't terminate the list prematurely on 64-bit host+guest.  */
449 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
450   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
451 #endif
452   { 0, 0, 0, 0 }
453 };
454 
455 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
456 
457 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
458 #if defined(__NR_utimensat)
459 #define __NR_sys_utimensat __NR_utimensat
460 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
461           const struct timespec *,tsp,int,flags)
462 #else
463 static int sys_utimensat(int dirfd, const char *pathname,
464                          const struct timespec times[2], int flags)
465 {
466     errno = ENOSYS;
467     return -1;
468 }
469 #endif
470 #endif /* TARGET_NR_utimensat */
471 
472 #ifdef TARGET_NR_renameat2
473 #if defined(__NR_renameat2)
474 #define __NR_sys_renameat2 __NR_renameat2
475 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
476           const char *, new, unsigned int, flags)
477 #else
478 static int sys_renameat2(int oldfd, const char *old,
479                          int newfd, const char *new, int flags)
480 {
481     if (flags == 0) {
482         return renameat(oldfd, old, newfd, new);
483     }
484     errno = ENOSYS;
485     return -1;
486 }
487 #endif
488 #endif /* TARGET_NR_renameat2 */
489 
490 #ifdef CONFIG_INOTIFY
491 #include <sys/inotify.h>
492 #else
493 /* Userspace can usually survive runtime without inotify */
494 #undef TARGET_NR_inotify_init
495 #undef TARGET_NR_inotify_init1
496 #undef TARGET_NR_inotify_add_watch
497 #undef TARGET_NR_inotify_rm_watch
498 #endif /* CONFIG_INOTIFY  */
499 
500 #if defined(TARGET_NR_prlimit64)
501 #ifndef __NR_prlimit64
502 # define __NR_prlimit64 -1
503 #endif
504 #define __NR_sys_prlimit64 __NR_prlimit64
505 /* The glibc rlimit structure may not be that used by the underlying syscall */
506 struct host_rlimit64 {
507     uint64_t rlim_cur;
508     uint64_t rlim_max;
509 };
510 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
511           const struct host_rlimit64 *, new_limit,
512           struct host_rlimit64 *, old_limit)
513 #endif
514 
515 
516 #if defined(TARGET_NR_timer_create)
517 /* Maximum of 32 active POSIX timers allowed at any one time. */
518 #define GUEST_TIMER_MAX 32
519 static timer_t g_posix_timers[GUEST_TIMER_MAX];
520 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
521 
522 static inline int next_free_host_timer(void)
523 {
524     int k;
525     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
526         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
527             return k;
528         }
529     }
530     return -1;
531 }
532 
533 static inline void free_host_timer_slot(int id)
534 {
535     qatomic_store_release(g_posix_timer_allocated + id, 0);
536 }
537 #endif
538 
539 static inline int host_to_target_errno(int host_errno)
540 {
541     switch (host_errno) {
542 #define E(X)  case X: return TARGET_##X;
543 #include "errnos.c.inc"
544 #undef E
545     default:
546         return host_errno;
547     }
548 }
549 
550 static inline int target_to_host_errno(int target_errno)
551 {
552     switch (target_errno) {
553 #define E(X)  case TARGET_##X: return X;
554 #include "errnos.c.inc"
555 #undef E
556     default:
557         return target_errno;
558     }
559 }
560 
561 abi_long get_errno(abi_long ret)
562 {
563     if (ret == -1)
564         return -host_to_target_errno(errno);
565     else
566         return ret;
567 }
568 
569 const char *target_strerror(int err)
570 {
571     if (err == QEMU_ERESTARTSYS) {
572         return "To be restarted";
573     }
574     if (err == QEMU_ESIGRETURN) {
575         return "Successful exit from sigreturn";
576     }
577 
578     return strerror(target_to_host_errno(err));
579 }
580 
581 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
582 {
583     int i;
584     uint8_t b;
585     if (usize <= ksize) {
586         return 1;
587     }
588     for (i = ksize; i < usize; i++) {
589         if (get_user_u8(b, addr + i)) {
590             return -TARGET_EFAULT;
591         }
592         if (b != 0) {
593             return 0;
594         }
595     }
596     return 1;
597 }
598 
599 #define safe_syscall0(type, name) \
600 static type safe_##name(void) \
601 { \
602     return safe_syscall(__NR_##name); \
603 }
604 
605 #define safe_syscall1(type, name, type1, arg1) \
606 static type safe_##name(type1 arg1) \
607 { \
608     return safe_syscall(__NR_##name, arg1); \
609 }
610 
611 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
612 static type safe_##name(type1 arg1, type2 arg2) \
613 { \
614     return safe_syscall(__NR_##name, arg1, arg2); \
615 }
616 
617 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
618 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
621 }
622 
623 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
624     type4, arg4) \
625 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
626 { \
627     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
628 }
629 
630 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
631     type4, arg4, type5, arg5) \
632 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
633     type5 arg5) \
634 { \
635     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
636 }
637 
638 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
639     type4, arg4, type5, arg5, type6, arg6) \
640 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
641     type5 arg5, type6 arg6) \
642 { \
643     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
644 }
645 
646 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
647 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
648 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
649               int, flags, mode_t, mode)
650 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
651 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
652               struct rusage *, rusage)
653 #endif
654 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
655               int, options, struct rusage *, rusage)
656 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
657               char **, argv, char **, envp, int, flags)
658 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
659     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
660 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
661               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
662 #endif
663 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
664 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
665               struct timespec *, tsp, const sigset_t *, sigmask,
666               size_t, sigsetsize)
667 #endif
668 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
669               int, maxevents, int, timeout, const sigset_t *, sigmask,
670               size_t, sigsetsize)
671 #if defined(__NR_futex)
672 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
673               const struct timespec *,timeout,int *,uaddr2,int,val3)
674 #endif
675 #if defined(__NR_futex_time64)
676 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
677               const struct timespec *,timeout,int *,uaddr2,int,val3)
678 #endif
679 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
680 safe_syscall2(int, kill, pid_t, pid, int, sig)
681 safe_syscall2(int, tkill, int, tid, int, sig)
682 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
683 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
684 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
685 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
686               unsigned long, pos_l, unsigned long, pos_h)
687 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
688               unsigned long, pos_l, unsigned long, pos_h)
689 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
690               socklen_t, addrlen)
691 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
692               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
693 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
694               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
695 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
696 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
697 safe_syscall2(int, flock, int, fd, int, operation)
698 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
699 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
700               const struct timespec *, uts, size_t, sigsetsize)
701 #endif
702 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
703               int, flags)
704 #if defined(TARGET_NR_nanosleep)
705 safe_syscall2(int, nanosleep, const struct timespec *, req,
706               struct timespec *, rem)
707 #endif
708 #if defined(TARGET_NR_clock_nanosleep) || \
709     defined(TARGET_NR_clock_nanosleep_time64)
710 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
711               const struct timespec *, req, struct timespec *, rem)
712 #endif
713 #ifdef __NR_ipc
714 #ifdef __s390x__
715 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
716               void *, ptr)
717 #else
718 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
719               void *, ptr, long, fifth)
720 #endif
721 #endif
722 #ifdef __NR_msgsnd
723 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
724               int, flags)
725 #endif
726 #ifdef __NR_msgrcv
727 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
728               long, msgtype, int, flags)
729 #endif
730 #ifdef __NR_semtimedop
731 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
732               unsigned, nsops, const struct timespec *, timeout)
733 #endif
734 #if defined(TARGET_NR_mq_timedsend) || \
735     defined(TARGET_NR_mq_timedsend_time64)
736 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
737               size_t, len, unsigned, prio, const struct timespec *, timeout)
738 #endif
739 #if defined(TARGET_NR_mq_timedreceive) || \
740     defined(TARGET_NR_mq_timedreceive_time64)
741 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
742               size_t, len, unsigned *, prio, const struct timespec *, timeout)
743 #endif
744 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
745 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
746               int, outfd, loff_t *, poutoff, size_t, length,
747               unsigned int, flags)
748 #endif
749 
750 /* We do ioctl like this rather than via safe_syscall3 to preserve the
751  * "third argument might be integer or pointer or not present" behaviour of
752  * the libc function.
753  */
754 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
755 /* Similarly for fcntl. Note that callers must always:
756  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
757  *  use the flock64 struct rather than unsuffixed flock
758  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
759  */
760 #ifdef __NR_fcntl64
761 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
762 #else
763 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
764 #endif
765 
766 static inline int host_to_target_sock_type(int host_type)
767 {
768     int target_type;
769 
770     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
771     case SOCK_DGRAM:
772         target_type = TARGET_SOCK_DGRAM;
773         break;
774     case SOCK_STREAM:
775         target_type = TARGET_SOCK_STREAM;
776         break;
777     default:
778         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
779         break;
780     }
781 
782 #if defined(SOCK_CLOEXEC)
783     if (host_type & SOCK_CLOEXEC) {
784         target_type |= TARGET_SOCK_CLOEXEC;
785     }
786 #endif
787 
788 #if defined(SOCK_NONBLOCK)
789     if (host_type & SOCK_NONBLOCK) {
790         target_type |= TARGET_SOCK_NONBLOCK;
791     }
792 #endif
793 
794     return target_type;
795 }
796 
797 static abi_ulong target_brk;
798 static abi_ulong target_original_brk;
799 static abi_ulong brk_page;
800 
801 void target_set_brk(abi_ulong new_brk)
802 {
803     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
804     brk_page = HOST_PAGE_ALIGN(target_brk);
805 }
806 
807 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
808 #define DEBUGF_BRK(message, args...)
809 
810 /* do_brk() must return target values and target errnos. */
811 abi_long do_brk(abi_ulong new_brk)
812 {
813     abi_long mapped_addr;
814     abi_ulong new_alloc_size;
815 
816     /* brk pointers are always untagged */
817 
818     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
819 
820     if (!new_brk) {
821         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
822         return target_brk;
823     }
824     if (new_brk < target_original_brk) {
825         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
826                    target_brk);
827         return target_brk;
828     }
829 
830     /* If the new brk is less than the highest page reserved to the
831      * target heap allocation, set it and we're almost done...  */
832     if (new_brk <= brk_page) {
833         /* Heap contents are initialized to zero, as for anonymous
834          * mapped pages.  */
835         if (new_brk > target_brk) {
836             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
837         }
838 	target_brk = new_brk;
839         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
840 	return target_brk;
841     }
842 
843     /* We need to allocate more memory after the brk... Note that
844      * we don't use MAP_FIXED because that will map over the top of
845      * any existing mapping (like the one with the host libc or qemu
846      * itself); instead we treat "mapped but at wrong address" as
847      * a failure and unmap again.
848      */
849     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
850     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
851                                         PROT_READ|PROT_WRITE,
852                                         MAP_ANON|MAP_PRIVATE, 0, 0));
853 
854     if (mapped_addr == brk_page) {
855         /* Heap contents are initialized to zero, as for anonymous
856          * mapped pages.  Technically the new pages are already
857          * initialized to zero since they *are* anonymous mapped
858          * pages, however we have to take care with the contents that
859          * come from the remaining part of the previous page: it may
860          * contains garbage data due to a previous heap usage (grown
861          * then shrunken).  */
862         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
863 
864         target_brk = new_brk;
865         brk_page = HOST_PAGE_ALIGN(target_brk);
866         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
867             target_brk);
868         return target_brk;
869     } else if (mapped_addr != -1) {
870         /* Mapped but at wrong address, meaning there wasn't actually
871          * enough space for this brk.
872          */
873         target_munmap(mapped_addr, new_alloc_size);
874         mapped_addr = -1;
875         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
876     }
877     else {
878         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
879     }
880 
881 #if defined(TARGET_ALPHA)
882     /* We (partially) emulate OSF/1 on Alpha, which requires we
883        return a proper errno, not an unchanged brk value.  */
884     return -TARGET_ENOMEM;
885 #endif
886     /* For everything else, return the previous break. */
887     return target_brk;
888 }
889 
890 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
891     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
892 static inline abi_long copy_from_user_fdset(fd_set *fds,
893                                             abi_ulong target_fds_addr,
894                                             int n)
895 {
896     int i, nw, j, k;
897     abi_ulong b, *target_fds;
898 
899     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
900     if (!(target_fds = lock_user(VERIFY_READ,
901                                  target_fds_addr,
902                                  sizeof(abi_ulong) * nw,
903                                  1)))
904         return -TARGET_EFAULT;
905 
906     FD_ZERO(fds);
907     k = 0;
908     for (i = 0; i < nw; i++) {
909         /* grab the abi_ulong */
910         __get_user(b, &target_fds[i]);
911         for (j = 0; j < TARGET_ABI_BITS; j++) {
912             /* check the bit inside the abi_ulong */
913             if ((b >> j) & 1)
914                 FD_SET(k, fds);
915             k++;
916         }
917     }
918 
919     unlock_user(target_fds, target_fds_addr, 0);
920 
921     return 0;
922 }
923 
924 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
925                                                  abi_ulong target_fds_addr,
926                                                  int n)
927 {
928     if (target_fds_addr) {
929         if (copy_from_user_fdset(fds, target_fds_addr, n))
930             return -TARGET_EFAULT;
931         *fds_ptr = fds;
932     } else {
933         *fds_ptr = NULL;
934     }
935     return 0;
936 }
937 
938 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
939                                           const fd_set *fds,
940                                           int n)
941 {
942     int i, nw, j, k;
943     abi_long v;
944     abi_ulong *target_fds;
945 
946     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
947     if (!(target_fds = lock_user(VERIFY_WRITE,
948                                  target_fds_addr,
949                                  sizeof(abi_ulong) * nw,
950                                  0)))
951         return -TARGET_EFAULT;
952 
953     k = 0;
954     for (i = 0; i < nw; i++) {
955         v = 0;
956         for (j = 0; j < TARGET_ABI_BITS; j++) {
957             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
958             k++;
959         }
960         __put_user(v, &target_fds[i]);
961     }
962 
963     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
964 
965     return 0;
966 }
967 #endif
968 
969 #if defined(__alpha__)
970 #define HOST_HZ 1024
971 #else
972 #define HOST_HZ 100
973 #endif
974 
975 static inline abi_long host_to_target_clock_t(long ticks)
976 {
977 #if HOST_HZ == TARGET_HZ
978     return ticks;
979 #else
980     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
981 #endif
982 }
983 
984 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
985                                              const struct rusage *rusage)
986 {
987     struct target_rusage *target_rusage;
988 
989     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
990         return -TARGET_EFAULT;
991     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
992     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
993     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
994     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
995     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
996     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
997     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
998     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
999     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1000     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1001     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1002     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1003     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1004     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1005     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1006     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1007     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1008     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1009     unlock_user_struct(target_rusage, target_addr, 1);
1010 
1011     return 0;
1012 }
1013 
1014 #ifdef TARGET_NR_setrlimit
1015 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1016 {
1017     abi_ulong target_rlim_swap;
1018     rlim_t result;
1019 
1020     target_rlim_swap = tswapal(target_rlim);
1021     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1022         return RLIM_INFINITY;
1023 
1024     result = target_rlim_swap;
1025     if (target_rlim_swap != (rlim_t)result)
1026         return RLIM_INFINITY;
1027 
1028     return result;
1029 }
1030 #endif
1031 
1032 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1033 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1034 {
1035     abi_ulong target_rlim_swap;
1036     abi_ulong result;
1037 
1038     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1039         target_rlim_swap = TARGET_RLIM_INFINITY;
1040     else
1041         target_rlim_swap = rlim;
1042     result = tswapal(target_rlim_swap);
1043 
1044     return result;
1045 }
1046 #endif
1047 
1048 static inline int target_to_host_resource(int code)
1049 {
1050     switch (code) {
1051     case TARGET_RLIMIT_AS:
1052         return RLIMIT_AS;
1053     case TARGET_RLIMIT_CORE:
1054         return RLIMIT_CORE;
1055     case TARGET_RLIMIT_CPU:
1056         return RLIMIT_CPU;
1057     case TARGET_RLIMIT_DATA:
1058         return RLIMIT_DATA;
1059     case TARGET_RLIMIT_FSIZE:
1060         return RLIMIT_FSIZE;
1061     case TARGET_RLIMIT_LOCKS:
1062         return RLIMIT_LOCKS;
1063     case TARGET_RLIMIT_MEMLOCK:
1064         return RLIMIT_MEMLOCK;
1065     case TARGET_RLIMIT_MSGQUEUE:
1066         return RLIMIT_MSGQUEUE;
1067     case TARGET_RLIMIT_NICE:
1068         return RLIMIT_NICE;
1069     case TARGET_RLIMIT_NOFILE:
1070         return RLIMIT_NOFILE;
1071     case TARGET_RLIMIT_NPROC:
1072         return RLIMIT_NPROC;
1073     case TARGET_RLIMIT_RSS:
1074         return RLIMIT_RSS;
1075     case TARGET_RLIMIT_RTPRIO:
1076         return RLIMIT_RTPRIO;
1077 #ifdef RLIMIT_RTTIME
1078     case TARGET_RLIMIT_RTTIME:
1079         return RLIMIT_RTTIME;
1080 #endif
1081     case TARGET_RLIMIT_SIGPENDING:
1082         return RLIMIT_SIGPENDING;
1083     case TARGET_RLIMIT_STACK:
1084         return RLIMIT_STACK;
1085     default:
1086         return code;
1087     }
1088 }
1089 
1090 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1091                                               abi_ulong target_tv_addr)
1092 {
1093     struct target_timeval *target_tv;
1094 
1095     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1096         return -TARGET_EFAULT;
1097     }
1098 
1099     __get_user(tv->tv_sec, &target_tv->tv_sec);
1100     __get_user(tv->tv_usec, &target_tv->tv_usec);
1101 
1102     unlock_user_struct(target_tv, target_tv_addr, 0);
1103 
1104     return 0;
1105 }
1106 
1107 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1108                                             const struct timeval *tv)
1109 {
1110     struct target_timeval *target_tv;
1111 
1112     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1113         return -TARGET_EFAULT;
1114     }
1115 
1116     __put_user(tv->tv_sec, &target_tv->tv_sec);
1117     __put_user(tv->tv_usec, &target_tv->tv_usec);
1118 
1119     unlock_user_struct(target_tv, target_tv_addr, 1);
1120 
1121     return 0;
1122 }
1123 
1124 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1125 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1126                                                 abi_ulong target_tv_addr)
1127 {
1128     struct target__kernel_sock_timeval *target_tv;
1129 
1130     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1131         return -TARGET_EFAULT;
1132     }
1133 
1134     __get_user(tv->tv_sec, &target_tv->tv_sec);
1135     __get_user(tv->tv_usec, &target_tv->tv_usec);
1136 
1137     unlock_user_struct(target_tv, target_tv_addr, 0);
1138 
1139     return 0;
1140 }
1141 #endif
1142 
1143 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1144                                               const struct timeval *tv)
1145 {
1146     struct target__kernel_sock_timeval *target_tv;
1147 
1148     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1149         return -TARGET_EFAULT;
1150     }
1151 
1152     __put_user(tv->tv_sec, &target_tv->tv_sec);
1153     __put_user(tv->tv_usec, &target_tv->tv_usec);
1154 
1155     unlock_user_struct(target_tv, target_tv_addr, 1);
1156 
1157     return 0;
1158 }
1159 
1160 #if defined(TARGET_NR_futex) || \
1161     defined(TARGET_NR_rt_sigtimedwait) || \
1162     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1163     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1164     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1165     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1166     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1167     defined(TARGET_NR_timer_settime) || \
1168     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1169 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1170                                                abi_ulong target_addr)
1171 {
1172     struct target_timespec *target_ts;
1173 
1174     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1175         return -TARGET_EFAULT;
1176     }
1177     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1178     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179     unlock_user_struct(target_ts, target_addr, 0);
1180     return 0;
1181 }
1182 #endif
1183 
1184 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1185     defined(TARGET_NR_timer_settime64) || \
1186     defined(TARGET_NR_mq_timedsend_time64) || \
1187     defined(TARGET_NR_mq_timedreceive_time64) || \
1188     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1189     defined(TARGET_NR_clock_nanosleep_time64) || \
1190     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1191     defined(TARGET_NR_utimensat) || \
1192     defined(TARGET_NR_utimensat_time64) || \
1193     defined(TARGET_NR_semtimedop_time64) || \
1194     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1195 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1196                                                  abi_ulong target_addr)
1197 {
1198     struct target__kernel_timespec *target_ts;
1199 
1200     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1204     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1205     /* in 32bit mode, this drops the padding */
1206     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1207     unlock_user_struct(target_ts, target_addr, 0);
1208     return 0;
1209 }
1210 #endif
1211 
1212 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1213                                                struct timespec *host_ts)
1214 {
1215     struct target_timespec *target_ts;
1216 
1217     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1218         return -TARGET_EFAULT;
1219     }
1220     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1221     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1222     unlock_user_struct(target_ts, target_addr, 1);
1223     return 0;
1224 }
1225 
1226 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1227                                                  struct timespec *host_ts)
1228 {
1229     struct target__kernel_timespec *target_ts;
1230 
1231     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1232         return -TARGET_EFAULT;
1233     }
1234     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1235     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1236     unlock_user_struct(target_ts, target_addr, 1);
1237     return 0;
1238 }
1239 
1240 #if defined(TARGET_NR_gettimeofday)
1241 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1242                                              struct timezone *tz)
1243 {
1244     struct target_timezone *target_tz;
1245 
1246     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1247         return -TARGET_EFAULT;
1248     }
1249 
1250     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1251     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1252 
1253     unlock_user_struct(target_tz, target_tz_addr, 1);
1254 
1255     return 0;
1256 }
1257 #endif
1258 
1259 #if defined(TARGET_NR_settimeofday)
1260 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1261                                                abi_ulong target_tz_addr)
1262 {
1263     struct target_timezone *target_tz;
1264 
1265     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1266         return -TARGET_EFAULT;
1267     }
1268 
1269     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1270     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1271 
1272     unlock_user_struct(target_tz, target_tz_addr, 0);
1273 
1274     return 0;
1275 }
1276 #endif
1277 
1278 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1279 #include <mqueue.h>
1280 
1281 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1282                                               abi_ulong target_mq_attr_addr)
1283 {
1284     struct target_mq_attr *target_mq_attr;
1285 
1286     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1287                           target_mq_attr_addr, 1))
1288         return -TARGET_EFAULT;
1289 
1290     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1291     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1292     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1293     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1294 
1295     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1296 
1297     return 0;
1298 }
1299 
1300 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1301                                             const struct mq_attr *attr)
1302 {
1303     struct target_mq_attr *target_mq_attr;
1304 
1305     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1306                           target_mq_attr_addr, 0))
1307         return -TARGET_EFAULT;
1308 
1309     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1310     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1311     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1312     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1313 
1314     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1315 
1316     return 0;
1317 }
1318 #endif
1319 
1320 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1321 /* do_select() must return target values and target errnos. */
1322 static abi_long do_select(int n,
1323                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1324                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1325 {
1326     fd_set rfds, wfds, efds;
1327     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1328     struct timeval tv;
1329     struct timespec ts, *ts_ptr;
1330     abi_long ret;
1331 
1332     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1333     if (ret) {
1334         return ret;
1335     }
1336     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1337     if (ret) {
1338         return ret;
1339     }
1340     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1341     if (ret) {
1342         return ret;
1343     }
1344 
1345     if (target_tv_addr) {
1346         if (copy_from_user_timeval(&tv, target_tv_addr))
1347             return -TARGET_EFAULT;
1348         ts.tv_sec = tv.tv_sec;
1349         ts.tv_nsec = tv.tv_usec * 1000;
1350         ts_ptr = &ts;
1351     } else {
1352         ts_ptr = NULL;
1353     }
1354 
1355     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1356                                   ts_ptr, NULL));
1357 
1358     if (!is_error(ret)) {
1359         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1360             return -TARGET_EFAULT;
1361         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1362             return -TARGET_EFAULT;
1363         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1364             return -TARGET_EFAULT;
1365 
1366         if (target_tv_addr) {
1367             tv.tv_sec = ts.tv_sec;
1368             tv.tv_usec = ts.tv_nsec / 1000;
1369             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1370                 return -TARGET_EFAULT;
1371             }
1372         }
1373     }
1374 
1375     return ret;
1376 }
1377 
1378 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1379 static abi_long do_old_select(abi_ulong arg1)
1380 {
1381     struct target_sel_arg_struct *sel;
1382     abi_ulong inp, outp, exp, tvp;
1383     long nsel;
1384 
1385     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1386         return -TARGET_EFAULT;
1387     }
1388 
1389     nsel = tswapal(sel->n);
1390     inp = tswapal(sel->inp);
1391     outp = tswapal(sel->outp);
1392     exp = tswapal(sel->exp);
1393     tvp = tswapal(sel->tvp);
1394 
1395     unlock_user_struct(sel, arg1, 0);
1396 
1397     return do_select(nsel, inp, outp, exp, tvp);
1398 }
1399 #endif
1400 #endif
1401 
1402 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1403 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1404                             abi_long arg4, abi_long arg5, abi_long arg6,
1405                             bool time64)
1406 {
1407     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1408     fd_set rfds, wfds, efds;
1409     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1410     struct timespec ts, *ts_ptr;
1411     abi_long ret;
1412 
1413     /*
1414      * The 6th arg is actually two args smashed together,
1415      * so we cannot use the C library.
1416      */
1417     struct {
1418         sigset_t *set;
1419         size_t size;
1420     } sig, *sig_ptr;
1421 
1422     abi_ulong arg_sigset, arg_sigsize, *arg7;
1423 
1424     n = arg1;
1425     rfd_addr = arg2;
1426     wfd_addr = arg3;
1427     efd_addr = arg4;
1428     ts_addr = arg5;
1429 
1430     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1431     if (ret) {
1432         return ret;
1433     }
1434     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1435     if (ret) {
1436         return ret;
1437     }
1438     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1439     if (ret) {
1440         return ret;
1441     }
1442 
1443     /*
1444      * This takes a timespec, and not a timeval, so we cannot
1445      * use the do_select() helper ...
1446      */
1447     if (ts_addr) {
1448         if (time64) {
1449             if (target_to_host_timespec64(&ts, ts_addr)) {
1450                 return -TARGET_EFAULT;
1451             }
1452         } else {
1453             if (target_to_host_timespec(&ts, ts_addr)) {
1454                 return -TARGET_EFAULT;
1455             }
1456         }
1457             ts_ptr = &ts;
1458     } else {
1459         ts_ptr = NULL;
1460     }
1461 
1462     /* Extract the two packed args for the sigset */
1463     sig_ptr = NULL;
1464     if (arg6) {
1465         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1466         if (!arg7) {
1467             return -TARGET_EFAULT;
1468         }
1469         arg_sigset = tswapal(arg7[0]);
1470         arg_sigsize = tswapal(arg7[1]);
1471         unlock_user(arg7, arg6, 0);
1472 
1473         if (arg_sigset) {
1474             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1475             if (ret != 0) {
1476                 return ret;
1477             }
1478             sig_ptr = &sig;
1479             sig.size = SIGSET_T_SIZE;
1480         }
1481     }
1482 
1483     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1484                                   ts_ptr, sig_ptr));
1485 
1486     if (sig_ptr) {
1487         finish_sigsuspend_mask(ret);
1488     }
1489 
1490     if (!is_error(ret)) {
1491         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1492             return -TARGET_EFAULT;
1493         }
1494         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1495             return -TARGET_EFAULT;
1496         }
1497         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (time64) {
1501             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1502                 return -TARGET_EFAULT;
1503             }
1504         } else {
1505             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1506                 return -TARGET_EFAULT;
1507             }
1508         }
1509     }
1510     return ret;
1511 }
1512 #endif
1513 
1514 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1515     defined(TARGET_NR_ppoll_time64)
1516 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1517                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1518 {
1519     struct target_pollfd *target_pfd;
1520     unsigned int nfds = arg2;
1521     struct pollfd *pfd;
1522     unsigned int i;
1523     abi_long ret;
1524 
1525     pfd = NULL;
1526     target_pfd = NULL;
1527     if (nfds) {
1528         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1529             return -TARGET_EINVAL;
1530         }
1531         target_pfd = lock_user(VERIFY_WRITE, arg1,
1532                                sizeof(struct target_pollfd) * nfds, 1);
1533         if (!target_pfd) {
1534             return -TARGET_EFAULT;
1535         }
1536 
1537         pfd = alloca(sizeof(struct pollfd) * nfds);
1538         for (i = 0; i < nfds; i++) {
1539             pfd[i].fd = tswap32(target_pfd[i].fd);
1540             pfd[i].events = tswap16(target_pfd[i].events);
1541         }
1542     }
1543     if (ppoll) {
1544         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1545         sigset_t *set = NULL;
1546 
1547         if (arg3) {
1548             if (time64) {
1549                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1550                     unlock_user(target_pfd, arg1, 0);
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (target_to_host_timespec(timeout_ts, arg3)) {
1555                     unlock_user(target_pfd, arg1, 0);
1556                     return -TARGET_EFAULT;
1557                 }
1558             }
1559         } else {
1560             timeout_ts = NULL;
1561         }
1562 
1563         if (arg4) {
1564             ret = process_sigsuspend_mask(&set, arg4, arg5);
1565             if (ret != 0) {
1566                 unlock_user(target_pfd, arg1, 0);
1567                 return ret;
1568             }
1569         }
1570 
1571         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1572                                    set, SIGSET_T_SIZE));
1573 
1574         if (set) {
1575             finish_sigsuspend_mask(ret);
1576         }
1577         if (!is_error(ret) && arg3) {
1578             if (time64) {
1579                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1580                     return -TARGET_EFAULT;
1581                 }
1582             } else {
1583                 if (host_to_target_timespec(arg3, timeout_ts)) {
1584                     return -TARGET_EFAULT;
1585                 }
1586             }
1587         }
1588     } else {
1589           struct timespec ts, *pts;
1590 
1591           if (arg3 >= 0) {
1592               /* Convert ms to secs, ns */
1593               ts.tv_sec = arg3 / 1000;
1594               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1595               pts = &ts;
1596           } else {
1597               /* -ve poll() timeout means "infinite" */
1598               pts = NULL;
1599           }
1600           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1601     }
1602 
1603     if (!is_error(ret)) {
1604         for (i = 0; i < nfds; i++) {
1605             target_pfd[i].revents = tswap16(pfd[i].revents);
1606         }
1607     }
1608     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1609     return ret;
1610 }
1611 #endif
1612 
1613 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1614                         int flags, int is_pipe2)
1615 {
1616     int host_pipe[2];
1617     abi_long ret;
1618     ret = pipe2(host_pipe, flags);
1619 
1620     if (is_error(ret))
1621         return get_errno(ret);
1622 
1623     /* Several targets have special calling conventions for the original
1624        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1625     if (!is_pipe2) {
1626 #if defined(TARGET_ALPHA)
1627         cpu_env->ir[IR_A4] = host_pipe[1];
1628         return host_pipe[0];
1629 #elif defined(TARGET_MIPS)
1630         cpu_env->active_tc.gpr[3] = host_pipe[1];
1631         return host_pipe[0];
1632 #elif defined(TARGET_SH4)
1633         cpu_env->gregs[1] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_SPARC)
1636         cpu_env->regwptr[1] = host_pipe[1];
1637         return host_pipe[0];
1638 #endif
1639     }
1640 
1641     if (put_user_s32(host_pipe[0], pipedes)
1642         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1643         return -TARGET_EFAULT;
1644     return get_errno(ret);
1645 }
1646 
1647 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1648                                               abi_ulong target_addr,
1649                                               socklen_t len)
1650 {
1651     struct target_ip_mreqn *target_smreqn;
1652 
1653     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1654     if (!target_smreqn)
1655         return -TARGET_EFAULT;
1656     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1657     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1658     if (len == sizeof(struct target_ip_mreqn))
1659         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1660     unlock_user(target_smreqn, target_addr, 0);
1661 
1662     return 0;
1663 }
1664 
1665 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1666                                                abi_ulong target_addr,
1667                                                socklen_t len)
1668 {
1669     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1670     sa_family_t sa_family;
1671     struct target_sockaddr *target_saddr;
1672 
1673     if (fd_trans_target_to_host_addr(fd)) {
1674         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1675     }
1676 
1677     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1678     if (!target_saddr)
1679         return -TARGET_EFAULT;
1680 
1681     sa_family = tswap16(target_saddr->sa_family);
1682 
1683     /* Oops. The caller might send a incomplete sun_path; sun_path
1684      * must be terminated by \0 (see the manual page), but
1685      * unfortunately it is quite common to specify sockaddr_un
1686      * length as "strlen(x->sun_path)" while it should be
1687      * "strlen(...) + 1". We'll fix that here if needed.
1688      * Linux kernel has a similar feature.
1689      */
1690 
1691     if (sa_family == AF_UNIX) {
1692         if (len < unix_maxlen && len > 0) {
1693             char *cp = (char*)target_saddr;
1694 
1695             if ( cp[len-1] && !cp[len] )
1696                 len++;
1697         }
1698         if (len > unix_maxlen)
1699             len = unix_maxlen;
1700     }
1701 
1702     memcpy(addr, target_saddr, len);
1703     addr->sa_family = sa_family;
1704     if (sa_family == AF_NETLINK) {
1705         struct sockaddr_nl *nladdr;
1706 
1707         nladdr = (struct sockaddr_nl *)addr;
1708         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1709         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1710     } else if (sa_family == AF_PACKET) {
1711 	struct target_sockaddr_ll *lladdr;
1712 
1713 	lladdr = (struct target_sockaddr_ll *)addr;
1714 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1715 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1716     }
1717     unlock_user(target_saddr, target_addr, 0);
1718 
1719     return 0;
1720 }
1721 
1722 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1723                                                struct sockaddr *addr,
1724                                                socklen_t len)
1725 {
1726     struct target_sockaddr *target_saddr;
1727 
1728     if (len == 0) {
1729         return 0;
1730     }
1731     assert(addr);
1732 
1733     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1734     if (!target_saddr)
1735         return -TARGET_EFAULT;
1736     memcpy(target_saddr, addr, len);
1737     if (len >= offsetof(struct target_sockaddr, sa_family) +
1738         sizeof(target_saddr->sa_family)) {
1739         target_saddr->sa_family = tswap16(addr->sa_family);
1740     }
1741     if (addr->sa_family == AF_NETLINK &&
1742         len >= sizeof(struct target_sockaddr_nl)) {
1743         struct target_sockaddr_nl *target_nl =
1744                (struct target_sockaddr_nl *)target_saddr;
1745         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1746         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1747     } else if (addr->sa_family == AF_PACKET) {
1748         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1749         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1750         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1751     } else if (addr->sa_family == AF_INET6 &&
1752                len >= sizeof(struct target_sockaddr_in6)) {
1753         struct target_sockaddr_in6 *target_in6 =
1754                (struct target_sockaddr_in6 *)target_saddr;
1755         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1756     }
1757     unlock_user(target_saddr, target_addr, len);
1758 
1759     return 0;
1760 }
1761 
1762 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1763                                            struct target_msghdr *target_msgh)
1764 {
1765     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1766     abi_long msg_controllen;
1767     abi_ulong target_cmsg_addr;
1768     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1769     socklen_t space = 0;
1770 
1771     msg_controllen = tswapal(target_msgh->msg_controllen);
1772     if (msg_controllen < sizeof (struct target_cmsghdr))
1773         goto the_end;
1774     target_cmsg_addr = tswapal(target_msgh->msg_control);
1775     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1776     target_cmsg_start = target_cmsg;
1777     if (!target_cmsg)
1778         return -TARGET_EFAULT;
1779 
1780     while (cmsg && target_cmsg) {
1781         void *data = CMSG_DATA(cmsg);
1782         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1783 
1784         int len = tswapal(target_cmsg->cmsg_len)
1785             - sizeof(struct target_cmsghdr);
1786 
1787         space += CMSG_SPACE(len);
1788         if (space > msgh->msg_controllen) {
1789             space -= CMSG_SPACE(len);
1790             /* This is a QEMU bug, since we allocated the payload
1791              * area ourselves (unlike overflow in host-to-target
1792              * conversion, which is just the guest giving us a buffer
1793              * that's too small). It can't happen for the payload types
1794              * we currently support; if it becomes an issue in future
1795              * we would need to improve our allocation strategy to
1796              * something more intelligent than "twice the size of the
1797              * target buffer we're reading from".
1798              */
1799             qemu_log_mask(LOG_UNIMP,
1800                           ("Unsupported ancillary data %d/%d: "
1801                            "unhandled msg size\n"),
1802                           tswap32(target_cmsg->cmsg_level),
1803                           tswap32(target_cmsg->cmsg_type));
1804             break;
1805         }
1806 
1807         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1808             cmsg->cmsg_level = SOL_SOCKET;
1809         } else {
1810             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1811         }
1812         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1813         cmsg->cmsg_len = CMSG_LEN(len);
1814 
1815         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1816             int *fd = (int *)data;
1817             int *target_fd = (int *)target_data;
1818             int i, numfds = len / sizeof(int);
1819 
1820             for (i = 0; i < numfds; i++) {
1821                 __get_user(fd[i], target_fd + i);
1822             }
1823         } else if (cmsg->cmsg_level == SOL_SOCKET
1824                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1825             struct ucred *cred = (struct ucred *)data;
1826             struct target_ucred *target_cred =
1827                 (struct target_ucred *)target_data;
1828 
1829             __get_user(cred->pid, &target_cred->pid);
1830             __get_user(cred->uid, &target_cred->uid);
1831             __get_user(cred->gid, &target_cred->gid);
1832         } else {
1833             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1834                           cmsg->cmsg_level, cmsg->cmsg_type);
1835             memcpy(data, target_data, len);
1836         }
1837 
1838         cmsg = CMSG_NXTHDR(msgh, cmsg);
1839         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1840                                          target_cmsg_start);
1841     }
1842     unlock_user(target_cmsg, target_cmsg_addr, 0);
1843  the_end:
1844     msgh->msg_controllen = space;
1845     return 0;
1846 }
1847 
1848 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1849                                            struct msghdr *msgh)
1850 {
1851     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1852     abi_long msg_controllen;
1853     abi_ulong target_cmsg_addr;
1854     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1855     socklen_t space = 0;
1856 
1857     msg_controllen = tswapal(target_msgh->msg_controllen);
1858     if (msg_controllen < sizeof (struct target_cmsghdr))
1859         goto the_end;
1860     target_cmsg_addr = tswapal(target_msgh->msg_control);
1861     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1862     target_cmsg_start = target_cmsg;
1863     if (!target_cmsg)
1864         return -TARGET_EFAULT;
1865 
1866     while (cmsg && target_cmsg) {
1867         void *data = CMSG_DATA(cmsg);
1868         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1869 
1870         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1871         int tgt_len, tgt_space;
1872 
1873         /* We never copy a half-header but may copy half-data;
1874          * this is Linux's behaviour in put_cmsg(). Note that
1875          * truncation here is a guest problem (which we report
1876          * to the guest via the CTRUNC bit), unlike truncation
1877          * in target_to_host_cmsg, which is a QEMU bug.
1878          */
1879         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1880             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1881             break;
1882         }
1883 
1884         if (cmsg->cmsg_level == SOL_SOCKET) {
1885             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1886         } else {
1887             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1888         }
1889         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1890 
1891         /* Payload types which need a different size of payload on
1892          * the target must adjust tgt_len here.
1893          */
1894         tgt_len = len;
1895         switch (cmsg->cmsg_level) {
1896         case SOL_SOCKET:
1897             switch (cmsg->cmsg_type) {
1898             case SO_TIMESTAMP:
1899                 tgt_len = sizeof(struct target_timeval);
1900                 break;
1901             default:
1902                 break;
1903             }
1904             break;
1905         default:
1906             break;
1907         }
1908 
1909         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1910             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1911             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1912         }
1913 
1914         /* We must now copy-and-convert len bytes of payload
1915          * into tgt_len bytes of destination space. Bear in mind
1916          * that in both source and destination we may be dealing
1917          * with a truncated value!
1918          */
1919         switch (cmsg->cmsg_level) {
1920         case SOL_SOCKET:
1921             switch (cmsg->cmsg_type) {
1922             case SCM_RIGHTS:
1923             {
1924                 int *fd = (int *)data;
1925                 int *target_fd = (int *)target_data;
1926                 int i, numfds = tgt_len / sizeof(int);
1927 
1928                 for (i = 0; i < numfds; i++) {
1929                     __put_user(fd[i], target_fd + i);
1930                 }
1931                 break;
1932             }
1933             case SO_TIMESTAMP:
1934             {
1935                 struct timeval *tv = (struct timeval *)data;
1936                 struct target_timeval *target_tv =
1937                     (struct target_timeval *)target_data;
1938 
1939                 if (len != sizeof(struct timeval) ||
1940                     tgt_len != sizeof(struct target_timeval)) {
1941                     goto unimplemented;
1942                 }
1943 
1944                 /* copy struct timeval to target */
1945                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1946                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1947                 break;
1948             }
1949             case SCM_CREDENTIALS:
1950             {
1951                 struct ucred *cred = (struct ucred *)data;
1952                 struct target_ucred *target_cred =
1953                     (struct target_ucred *)target_data;
1954 
1955                 __put_user(cred->pid, &target_cred->pid);
1956                 __put_user(cred->uid, &target_cred->uid);
1957                 __put_user(cred->gid, &target_cred->gid);
1958                 break;
1959             }
1960             default:
1961                 goto unimplemented;
1962             }
1963             break;
1964 
1965         case SOL_IP:
1966             switch (cmsg->cmsg_type) {
1967             case IP_TTL:
1968             {
1969                 uint32_t *v = (uint32_t *)data;
1970                 uint32_t *t_int = (uint32_t *)target_data;
1971 
1972                 if (len != sizeof(uint32_t) ||
1973                     tgt_len != sizeof(uint32_t)) {
1974                     goto unimplemented;
1975                 }
1976                 __put_user(*v, t_int);
1977                 break;
1978             }
1979             case IP_RECVERR:
1980             {
1981                 struct errhdr_t {
1982                    struct sock_extended_err ee;
1983                    struct sockaddr_in offender;
1984                 };
1985                 struct errhdr_t *errh = (struct errhdr_t *)data;
1986                 struct errhdr_t *target_errh =
1987                     (struct errhdr_t *)target_data;
1988 
1989                 if (len != sizeof(struct errhdr_t) ||
1990                     tgt_len != sizeof(struct errhdr_t)) {
1991                     goto unimplemented;
1992                 }
1993                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1994                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1995                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1996                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1997                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1998                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1999                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2000                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2001                     (void *) &errh->offender, sizeof(errh->offender));
2002                 break;
2003             }
2004             default:
2005                 goto unimplemented;
2006             }
2007             break;
2008 
2009         case SOL_IPV6:
2010             switch (cmsg->cmsg_type) {
2011             case IPV6_HOPLIMIT:
2012             {
2013                 uint32_t *v = (uint32_t *)data;
2014                 uint32_t *t_int = (uint32_t *)target_data;
2015 
2016                 if (len != sizeof(uint32_t) ||
2017                     tgt_len != sizeof(uint32_t)) {
2018                     goto unimplemented;
2019                 }
2020                 __put_user(*v, t_int);
2021                 break;
2022             }
2023             case IPV6_RECVERR:
2024             {
2025                 struct errhdr6_t {
2026                    struct sock_extended_err ee;
2027                    struct sockaddr_in6 offender;
2028                 };
2029                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2030                 struct errhdr6_t *target_errh =
2031                     (struct errhdr6_t *)target_data;
2032 
2033                 if (len != sizeof(struct errhdr6_t) ||
2034                     tgt_len != sizeof(struct errhdr6_t)) {
2035                     goto unimplemented;
2036                 }
2037                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2038                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2039                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2040                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2041                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2042                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2043                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2044                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2045                     (void *) &errh->offender, sizeof(errh->offender));
2046                 break;
2047             }
2048             default:
2049                 goto unimplemented;
2050             }
2051             break;
2052 
2053         default:
2054         unimplemented:
2055             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2056                           cmsg->cmsg_level, cmsg->cmsg_type);
2057             memcpy(target_data, data, MIN(len, tgt_len));
2058             if (tgt_len > len) {
2059                 memset(target_data + len, 0, tgt_len - len);
2060             }
2061         }
2062 
2063         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2064         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2065         if (msg_controllen < tgt_space) {
2066             tgt_space = msg_controllen;
2067         }
2068         msg_controllen -= tgt_space;
2069         space += tgt_space;
2070         cmsg = CMSG_NXTHDR(msgh, cmsg);
2071         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2072                                          target_cmsg_start);
2073     }
2074     unlock_user(target_cmsg, target_cmsg_addr, space);
2075  the_end:
2076     target_msgh->msg_controllen = tswapal(space);
2077     return 0;
2078 }
2079 
2080 /* do_setsockopt() Must return target values and target errnos. */
2081 static abi_long do_setsockopt(int sockfd, int level, int optname,
2082                               abi_ulong optval_addr, socklen_t optlen)
2083 {
2084     abi_long ret;
2085     int val;
2086     struct ip_mreqn *ip_mreq;
2087     struct ip_mreq_source *ip_mreq_source;
2088 
2089     switch(level) {
2090     case SOL_TCP:
2091     case SOL_UDP:
2092         /* TCP and UDP options all take an 'int' value.  */
2093         if (optlen < sizeof(uint32_t))
2094             return -TARGET_EINVAL;
2095 
2096         if (get_user_u32(val, optval_addr))
2097             return -TARGET_EFAULT;
2098         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2099         break;
2100     case SOL_IP:
2101         switch(optname) {
2102         case IP_TOS:
2103         case IP_TTL:
2104         case IP_HDRINCL:
2105         case IP_ROUTER_ALERT:
2106         case IP_RECVOPTS:
2107         case IP_RETOPTS:
2108         case IP_PKTINFO:
2109         case IP_MTU_DISCOVER:
2110         case IP_RECVERR:
2111         case IP_RECVTTL:
2112         case IP_RECVTOS:
2113 #ifdef IP_FREEBIND
2114         case IP_FREEBIND:
2115 #endif
2116         case IP_MULTICAST_TTL:
2117         case IP_MULTICAST_LOOP:
2118             val = 0;
2119             if (optlen >= sizeof(uint32_t)) {
2120                 if (get_user_u32(val, optval_addr))
2121                     return -TARGET_EFAULT;
2122             } else if (optlen >= 1) {
2123                 if (get_user_u8(val, optval_addr))
2124                     return -TARGET_EFAULT;
2125             }
2126             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2127             break;
2128         case IP_ADD_MEMBERSHIP:
2129         case IP_DROP_MEMBERSHIP:
2130             if (optlen < sizeof (struct target_ip_mreq) ||
2131                 optlen > sizeof (struct target_ip_mreqn))
2132                 return -TARGET_EINVAL;
2133 
2134             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2135             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2136             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2137             break;
2138 
2139         case IP_BLOCK_SOURCE:
2140         case IP_UNBLOCK_SOURCE:
2141         case IP_ADD_SOURCE_MEMBERSHIP:
2142         case IP_DROP_SOURCE_MEMBERSHIP:
2143             if (optlen != sizeof (struct target_ip_mreq_source))
2144                 return -TARGET_EINVAL;
2145 
2146             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2147             if (!ip_mreq_source) {
2148                 return -TARGET_EFAULT;
2149             }
2150             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2151             unlock_user (ip_mreq_source, optval_addr, 0);
2152             break;
2153 
2154         default:
2155             goto unimplemented;
2156         }
2157         break;
2158     case SOL_IPV6:
2159         switch (optname) {
2160         case IPV6_MTU_DISCOVER:
2161         case IPV6_MTU:
2162         case IPV6_V6ONLY:
2163         case IPV6_RECVPKTINFO:
2164         case IPV6_UNICAST_HOPS:
2165         case IPV6_MULTICAST_HOPS:
2166         case IPV6_MULTICAST_LOOP:
2167         case IPV6_RECVERR:
2168         case IPV6_RECVHOPLIMIT:
2169         case IPV6_2292HOPLIMIT:
2170         case IPV6_CHECKSUM:
2171         case IPV6_ADDRFORM:
2172         case IPV6_2292PKTINFO:
2173         case IPV6_RECVTCLASS:
2174         case IPV6_RECVRTHDR:
2175         case IPV6_2292RTHDR:
2176         case IPV6_RECVHOPOPTS:
2177         case IPV6_2292HOPOPTS:
2178         case IPV6_RECVDSTOPTS:
2179         case IPV6_2292DSTOPTS:
2180         case IPV6_TCLASS:
2181         case IPV6_ADDR_PREFERENCES:
2182 #ifdef IPV6_RECVPATHMTU
2183         case IPV6_RECVPATHMTU:
2184 #endif
2185 #ifdef IPV6_TRANSPARENT
2186         case IPV6_TRANSPARENT:
2187 #endif
2188 #ifdef IPV6_FREEBIND
2189         case IPV6_FREEBIND:
2190 #endif
2191 #ifdef IPV6_RECVORIGDSTADDR
2192         case IPV6_RECVORIGDSTADDR:
2193 #endif
2194             val = 0;
2195             if (optlen < sizeof(uint32_t)) {
2196                 return -TARGET_EINVAL;
2197             }
2198             if (get_user_u32(val, optval_addr)) {
2199                 return -TARGET_EFAULT;
2200             }
2201             ret = get_errno(setsockopt(sockfd, level, optname,
2202                                        &val, sizeof(val)));
2203             break;
2204         case IPV6_PKTINFO:
2205         {
2206             struct in6_pktinfo pki;
2207 
2208             if (optlen < sizeof(pki)) {
2209                 return -TARGET_EINVAL;
2210             }
2211 
2212             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2213                 return -TARGET_EFAULT;
2214             }
2215 
2216             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2217 
2218             ret = get_errno(setsockopt(sockfd, level, optname,
2219                                        &pki, sizeof(pki)));
2220             break;
2221         }
2222         case IPV6_ADD_MEMBERSHIP:
2223         case IPV6_DROP_MEMBERSHIP:
2224         {
2225             struct ipv6_mreq ipv6mreq;
2226 
2227             if (optlen < sizeof(ipv6mreq)) {
2228                 return -TARGET_EINVAL;
2229             }
2230 
2231             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2232                 return -TARGET_EFAULT;
2233             }
2234 
2235             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2236 
2237             ret = get_errno(setsockopt(sockfd, level, optname,
2238                                        &ipv6mreq, sizeof(ipv6mreq)));
2239             break;
2240         }
2241         default:
2242             goto unimplemented;
2243         }
2244         break;
2245     case SOL_ICMPV6:
2246         switch (optname) {
2247         case ICMPV6_FILTER:
2248         {
2249             struct icmp6_filter icmp6f;
2250 
2251             if (optlen > sizeof(icmp6f)) {
2252                 optlen = sizeof(icmp6f);
2253             }
2254 
2255             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2256                 return -TARGET_EFAULT;
2257             }
2258 
2259             for (val = 0; val < 8; val++) {
2260                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2261             }
2262 
2263             ret = get_errno(setsockopt(sockfd, level, optname,
2264                                        &icmp6f, optlen));
2265             break;
2266         }
2267         default:
2268             goto unimplemented;
2269         }
2270         break;
2271     case SOL_RAW:
2272         switch (optname) {
2273         case ICMP_FILTER:
2274         case IPV6_CHECKSUM:
2275             /* those take an u32 value */
2276             if (optlen < sizeof(uint32_t)) {
2277                 return -TARGET_EINVAL;
2278             }
2279 
2280             if (get_user_u32(val, optval_addr)) {
2281                 return -TARGET_EFAULT;
2282             }
2283             ret = get_errno(setsockopt(sockfd, level, optname,
2284                                        &val, sizeof(val)));
2285             break;
2286 
2287         default:
2288             goto unimplemented;
2289         }
2290         break;
2291 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2292     case SOL_ALG:
2293         switch (optname) {
2294         case ALG_SET_KEY:
2295         {
2296             char *alg_key = g_malloc(optlen);
2297 
2298             if (!alg_key) {
2299                 return -TARGET_ENOMEM;
2300             }
2301             if (copy_from_user(alg_key, optval_addr, optlen)) {
2302                 g_free(alg_key);
2303                 return -TARGET_EFAULT;
2304             }
2305             ret = get_errno(setsockopt(sockfd, level, optname,
2306                                        alg_key, optlen));
2307             g_free(alg_key);
2308             break;
2309         }
2310         case ALG_SET_AEAD_AUTHSIZE:
2311         {
2312             ret = get_errno(setsockopt(sockfd, level, optname,
2313                                        NULL, optlen));
2314             break;
2315         }
2316         default:
2317             goto unimplemented;
2318         }
2319         break;
2320 #endif
2321     case TARGET_SOL_SOCKET:
2322         switch (optname) {
2323         case TARGET_SO_RCVTIMEO:
2324         {
2325                 struct timeval tv;
2326 
2327                 optname = SO_RCVTIMEO;
2328 
2329 set_timeout:
2330                 if (optlen != sizeof(struct target_timeval)) {
2331                     return -TARGET_EINVAL;
2332                 }
2333 
2334                 if (copy_from_user_timeval(&tv, optval_addr)) {
2335                     return -TARGET_EFAULT;
2336                 }
2337 
2338                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2339                                 &tv, sizeof(tv)));
2340                 return ret;
2341         }
2342         case TARGET_SO_SNDTIMEO:
2343                 optname = SO_SNDTIMEO;
2344                 goto set_timeout;
2345         case TARGET_SO_ATTACH_FILTER:
2346         {
2347                 struct target_sock_fprog *tfprog;
2348                 struct target_sock_filter *tfilter;
2349                 struct sock_fprog fprog;
2350                 struct sock_filter *filter;
2351                 int i;
2352 
2353                 if (optlen != sizeof(*tfprog)) {
2354                     return -TARGET_EINVAL;
2355                 }
2356                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2357                     return -TARGET_EFAULT;
2358                 }
2359                 if (!lock_user_struct(VERIFY_READ, tfilter,
2360                                       tswapal(tfprog->filter), 0)) {
2361                     unlock_user_struct(tfprog, optval_addr, 1);
2362                     return -TARGET_EFAULT;
2363                 }
2364 
2365                 fprog.len = tswap16(tfprog->len);
2366                 filter = g_try_new(struct sock_filter, fprog.len);
2367                 if (filter == NULL) {
2368                     unlock_user_struct(tfilter, tfprog->filter, 1);
2369                     unlock_user_struct(tfprog, optval_addr, 1);
2370                     return -TARGET_ENOMEM;
2371                 }
2372                 for (i = 0; i < fprog.len; i++) {
2373                     filter[i].code = tswap16(tfilter[i].code);
2374                     filter[i].jt = tfilter[i].jt;
2375                     filter[i].jf = tfilter[i].jf;
2376                     filter[i].k = tswap32(tfilter[i].k);
2377                 }
2378                 fprog.filter = filter;
2379 
2380                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2381                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2382                 g_free(filter);
2383 
2384                 unlock_user_struct(tfilter, tfprog->filter, 1);
2385                 unlock_user_struct(tfprog, optval_addr, 1);
2386                 return ret;
2387         }
2388 	case TARGET_SO_BINDTODEVICE:
2389 	{
2390 		char *dev_ifname, *addr_ifname;
2391 
2392 		if (optlen > IFNAMSIZ - 1) {
2393 		    optlen = IFNAMSIZ - 1;
2394 		}
2395 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2396 		if (!dev_ifname) {
2397 		    return -TARGET_EFAULT;
2398 		}
2399 		optname = SO_BINDTODEVICE;
2400 		addr_ifname = alloca(IFNAMSIZ);
2401 		memcpy(addr_ifname, dev_ifname, optlen);
2402 		addr_ifname[optlen] = 0;
2403 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2404                                            addr_ifname, optlen));
2405 		unlock_user (dev_ifname, optval_addr, 0);
2406 		return ret;
2407 	}
2408         case TARGET_SO_LINGER:
2409         {
2410                 struct linger lg;
2411                 struct target_linger *tlg;
2412 
2413                 if (optlen != sizeof(struct target_linger)) {
2414                     return -TARGET_EINVAL;
2415                 }
2416                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2417                     return -TARGET_EFAULT;
2418                 }
2419                 __get_user(lg.l_onoff, &tlg->l_onoff);
2420                 __get_user(lg.l_linger, &tlg->l_linger);
2421                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2422                                 &lg, sizeof(lg)));
2423                 unlock_user_struct(tlg, optval_addr, 0);
2424                 return ret;
2425         }
2426             /* Options with 'int' argument.  */
2427         case TARGET_SO_DEBUG:
2428 		optname = SO_DEBUG;
2429 		break;
2430         case TARGET_SO_REUSEADDR:
2431 		optname = SO_REUSEADDR;
2432 		break;
2433 #ifdef SO_REUSEPORT
2434         case TARGET_SO_REUSEPORT:
2435                 optname = SO_REUSEPORT;
2436                 break;
2437 #endif
2438         case TARGET_SO_TYPE:
2439 		optname = SO_TYPE;
2440 		break;
2441         case TARGET_SO_ERROR:
2442 		optname = SO_ERROR;
2443 		break;
2444         case TARGET_SO_DONTROUTE:
2445 		optname = SO_DONTROUTE;
2446 		break;
2447         case TARGET_SO_BROADCAST:
2448 		optname = SO_BROADCAST;
2449 		break;
2450         case TARGET_SO_SNDBUF:
2451 		optname = SO_SNDBUF;
2452 		break;
2453         case TARGET_SO_SNDBUFFORCE:
2454                 optname = SO_SNDBUFFORCE;
2455                 break;
2456         case TARGET_SO_RCVBUF:
2457 		optname = SO_RCVBUF;
2458 		break;
2459         case TARGET_SO_RCVBUFFORCE:
2460                 optname = SO_RCVBUFFORCE;
2461                 break;
2462         case TARGET_SO_KEEPALIVE:
2463 		optname = SO_KEEPALIVE;
2464 		break;
2465         case TARGET_SO_OOBINLINE:
2466 		optname = SO_OOBINLINE;
2467 		break;
2468         case TARGET_SO_NO_CHECK:
2469 		optname = SO_NO_CHECK;
2470 		break;
2471         case TARGET_SO_PRIORITY:
2472 		optname = SO_PRIORITY;
2473 		break;
2474 #ifdef SO_BSDCOMPAT
2475         case TARGET_SO_BSDCOMPAT:
2476 		optname = SO_BSDCOMPAT;
2477 		break;
2478 #endif
2479         case TARGET_SO_PASSCRED:
2480 		optname = SO_PASSCRED;
2481 		break;
2482         case TARGET_SO_PASSSEC:
2483                 optname = SO_PASSSEC;
2484                 break;
2485         case TARGET_SO_TIMESTAMP:
2486 		optname = SO_TIMESTAMP;
2487 		break;
2488         case TARGET_SO_RCVLOWAT:
2489 		optname = SO_RCVLOWAT;
2490 		break;
2491         default:
2492             goto unimplemented;
2493         }
2494 	if (optlen < sizeof(uint32_t))
2495             return -TARGET_EINVAL;
2496 
2497 	if (get_user_u32(val, optval_addr))
2498             return -TARGET_EFAULT;
2499 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2500         break;
2501 #ifdef SOL_NETLINK
2502     case SOL_NETLINK:
2503         switch (optname) {
2504         case NETLINK_PKTINFO:
2505         case NETLINK_ADD_MEMBERSHIP:
2506         case NETLINK_DROP_MEMBERSHIP:
2507         case NETLINK_BROADCAST_ERROR:
2508         case NETLINK_NO_ENOBUFS:
2509 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2510         case NETLINK_LISTEN_ALL_NSID:
2511         case NETLINK_CAP_ACK:
2512 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2513 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2514         case NETLINK_EXT_ACK:
2515 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2516 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2517         case NETLINK_GET_STRICT_CHK:
2518 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2519             break;
2520         default:
2521             goto unimplemented;
2522         }
2523         val = 0;
2524         if (optlen < sizeof(uint32_t)) {
2525             return -TARGET_EINVAL;
2526         }
2527         if (get_user_u32(val, optval_addr)) {
2528             return -TARGET_EFAULT;
2529         }
2530         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2531                                    sizeof(val)));
2532         break;
2533 #endif /* SOL_NETLINK */
2534     default:
2535     unimplemented:
2536         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2537                       level, optname);
2538         ret = -TARGET_ENOPROTOOPT;
2539     }
2540     return ret;
2541 }
2542 
2543 /* do_getsockopt() Must return target values and target errnos. */
2544 static abi_long do_getsockopt(int sockfd, int level, int optname,
2545                               abi_ulong optval_addr, abi_ulong optlen)
2546 {
2547     abi_long ret;
2548     int len, val;
2549     socklen_t lv;
2550 
2551     switch(level) {
2552     case TARGET_SOL_SOCKET:
2553         level = SOL_SOCKET;
2554         switch (optname) {
2555         /* These don't just return a single integer */
2556         case TARGET_SO_PEERNAME:
2557             goto unimplemented;
2558         case TARGET_SO_RCVTIMEO: {
2559             struct timeval tv;
2560             socklen_t tvlen;
2561 
2562             optname = SO_RCVTIMEO;
2563 
2564 get_timeout:
2565             if (get_user_u32(len, optlen)) {
2566                 return -TARGET_EFAULT;
2567             }
2568             if (len < 0) {
2569                 return -TARGET_EINVAL;
2570             }
2571 
2572             tvlen = sizeof(tv);
2573             ret = get_errno(getsockopt(sockfd, level, optname,
2574                                        &tv, &tvlen));
2575             if (ret < 0) {
2576                 return ret;
2577             }
2578             if (len > sizeof(struct target_timeval)) {
2579                 len = sizeof(struct target_timeval);
2580             }
2581             if (copy_to_user_timeval(optval_addr, &tv)) {
2582                 return -TARGET_EFAULT;
2583             }
2584             if (put_user_u32(len, optlen)) {
2585                 return -TARGET_EFAULT;
2586             }
2587             break;
2588         }
2589         case TARGET_SO_SNDTIMEO:
2590             optname = SO_SNDTIMEO;
2591             goto get_timeout;
2592         case TARGET_SO_PEERCRED: {
2593             struct ucred cr;
2594             socklen_t crlen;
2595             struct target_ucred *tcr;
2596 
2597             if (get_user_u32(len, optlen)) {
2598                 return -TARGET_EFAULT;
2599             }
2600             if (len < 0) {
2601                 return -TARGET_EINVAL;
2602             }
2603 
2604             crlen = sizeof(cr);
2605             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2606                                        &cr, &crlen));
2607             if (ret < 0) {
2608                 return ret;
2609             }
2610             if (len > crlen) {
2611                 len = crlen;
2612             }
2613             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2614                 return -TARGET_EFAULT;
2615             }
2616             __put_user(cr.pid, &tcr->pid);
2617             __put_user(cr.uid, &tcr->uid);
2618             __put_user(cr.gid, &tcr->gid);
2619             unlock_user_struct(tcr, optval_addr, 1);
2620             if (put_user_u32(len, optlen)) {
2621                 return -TARGET_EFAULT;
2622             }
2623             break;
2624         }
2625         case TARGET_SO_PEERSEC: {
2626             char *name;
2627 
2628             if (get_user_u32(len, optlen)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             if (len < 0) {
2632                 return -TARGET_EINVAL;
2633             }
2634             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2635             if (!name) {
2636                 return -TARGET_EFAULT;
2637             }
2638             lv = len;
2639             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2640                                        name, &lv));
2641             if (put_user_u32(lv, optlen)) {
2642                 ret = -TARGET_EFAULT;
2643             }
2644             unlock_user(name, optval_addr, lv);
2645             break;
2646         }
2647         case TARGET_SO_LINGER:
2648         {
2649             struct linger lg;
2650             socklen_t lglen;
2651             struct target_linger *tlg;
2652 
2653             if (get_user_u32(len, optlen)) {
2654                 return -TARGET_EFAULT;
2655             }
2656             if (len < 0) {
2657                 return -TARGET_EINVAL;
2658             }
2659 
2660             lglen = sizeof(lg);
2661             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2662                                        &lg, &lglen));
2663             if (ret < 0) {
2664                 return ret;
2665             }
2666             if (len > lglen) {
2667                 len = lglen;
2668             }
2669             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2670                 return -TARGET_EFAULT;
2671             }
2672             __put_user(lg.l_onoff, &tlg->l_onoff);
2673             __put_user(lg.l_linger, &tlg->l_linger);
2674             unlock_user_struct(tlg, optval_addr, 1);
2675             if (put_user_u32(len, optlen)) {
2676                 return -TARGET_EFAULT;
2677             }
2678             break;
2679         }
2680         /* Options with 'int' argument.  */
2681         case TARGET_SO_DEBUG:
2682             optname = SO_DEBUG;
2683             goto int_case;
2684         case TARGET_SO_REUSEADDR:
2685             optname = SO_REUSEADDR;
2686             goto int_case;
2687 #ifdef SO_REUSEPORT
2688         case TARGET_SO_REUSEPORT:
2689             optname = SO_REUSEPORT;
2690             goto int_case;
2691 #endif
2692         case TARGET_SO_TYPE:
2693             optname = SO_TYPE;
2694             goto int_case;
2695         case TARGET_SO_ERROR:
2696             optname = SO_ERROR;
2697             goto int_case;
2698         case TARGET_SO_DONTROUTE:
2699             optname = SO_DONTROUTE;
2700             goto int_case;
2701         case TARGET_SO_BROADCAST:
2702             optname = SO_BROADCAST;
2703             goto int_case;
2704         case TARGET_SO_SNDBUF:
2705             optname = SO_SNDBUF;
2706             goto int_case;
2707         case TARGET_SO_RCVBUF:
2708             optname = SO_RCVBUF;
2709             goto int_case;
2710         case TARGET_SO_KEEPALIVE:
2711             optname = SO_KEEPALIVE;
2712             goto int_case;
2713         case TARGET_SO_OOBINLINE:
2714             optname = SO_OOBINLINE;
2715             goto int_case;
2716         case TARGET_SO_NO_CHECK:
2717             optname = SO_NO_CHECK;
2718             goto int_case;
2719         case TARGET_SO_PRIORITY:
2720             optname = SO_PRIORITY;
2721             goto int_case;
2722 #ifdef SO_BSDCOMPAT
2723         case TARGET_SO_BSDCOMPAT:
2724             optname = SO_BSDCOMPAT;
2725             goto int_case;
2726 #endif
2727         case TARGET_SO_PASSCRED:
2728             optname = SO_PASSCRED;
2729             goto int_case;
2730         case TARGET_SO_TIMESTAMP:
2731             optname = SO_TIMESTAMP;
2732             goto int_case;
2733         case TARGET_SO_RCVLOWAT:
2734             optname = SO_RCVLOWAT;
2735             goto int_case;
2736         case TARGET_SO_ACCEPTCONN:
2737             optname = SO_ACCEPTCONN;
2738             goto int_case;
2739         case TARGET_SO_PROTOCOL:
2740             optname = SO_PROTOCOL;
2741             goto int_case;
2742         case TARGET_SO_DOMAIN:
2743             optname = SO_DOMAIN;
2744             goto int_case;
2745         default:
2746             goto int_case;
2747         }
2748         break;
2749     case SOL_TCP:
2750     case SOL_UDP:
2751         /* TCP and UDP options all take an 'int' value.  */
2752     int_case:
2753         if (get_user_u32(len, optlen))
2754             return -TARGET_EFAULT;
2755         if (len < 0)
2756             return -TARGET_EINVAL;
2757         lv = sizeof(lv);
2758         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2759         if (ret < 0)
2760             return ret;
2761         switch (optname) {
2762         case SO_TYPE:
2763             val = host_to_target_sock_type(val);
2764             break;
2765         case SO_ERROR:
2766             val = host_to_target_errno(val);
2767             break;
2768         }
2769         if (len > lv)
2770             len = lv;
2771         if (len == 4) {
2772             if (put_user_u32(val, optval_addr))
2773                 return -TARGET_EFAULT;
2774         } else {
2775             if (put_user_u8(val, optval_addr))
2776                 return -TARGET_EFAULT;
2777         }
2778         if (put_user_u32(len, optlen))
2779             return -TARGET_EFAULT;
2780         break;
2781     case SOL_IP:
2782         switch(optname) {
2783         case IP_TOS:
2784         case IP_TTL:
2785         case IP_HDRINCL:
2786         case IP_ROUTER_ALERT:
2787         case IP_RECVOPTS:
2788         case IP_RETOPTS:
2789         case IP_PKTINFO:
2790         case IP_MTU_DISCOVER:
2791         case IP_RECVERR:
2792         case IP_RECVTOS:
2793 #ifdef IP_FREEBIND
2794         case IP_FREEBIND:
2795 #endif
2796         case IP_MULTICAST_TTL:
2797         case IP_MULTICAST_LOOP:
2798             if (get_user_u32(len, optlen))
2799                 return -TARGET_EFAULT;
2800             if (len < 0)
2801                 return -TARGET_EINVAL;
2802             lv = sizeof(lv);
2803             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2804             if (ret < 0)
2805                 return ret;
2806             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2807                 len = 1;
2808                 if (put_user_u32(len, optlen)
2809                     || put_user_u8(val, optval_addr))
2810                     return -TARGET_EFAULT;
2811             } else {
2812                 if (len > sizeof(int))
2813                     len = sizeof(int);
2814                 if (put_user_u32(len, optlen)
2815                     || put_user_u32(val, optval_addr))
2816                     return -TARGET_EFAULT;
2817             }
2818             break;
2819         default:
2820             ret = -TARGET_ENOPROTOOPT;
2821             break;
2822         }
2823         break;
2824     case SOL_IPV6:
2825         switch (optname) {
2826         case IPV6_MTU_DISCOVER:
2827         case IPV6_MTU:
2828         case IPV6_V6ONLY:
2829         case IPV6_RECVPKTINFO:
2830         case IPV6_UNICAST_HOPS:
2831         case IPV6_MULTICAST_HOPS:
2832         case IPV6_MULTICAST_LOOP:
2833         case IPV6_RECVERR:
2834         case IPV6_RECVHOPLIMIT:
2835         case IPV6_2292HOPLIMIT:
2836         case IPV6_CHECKSUM:
2837         case IPV6_ADDRFORM:
2838         case IPV6_2292PKTINFO:
2839         case IPV6_RECVTCLASS:
2840         case IPV6_RECVRTHDR:
2841         case IPV6_2292RTHDR:
2842         case IPV6_RECVHOPOPTS:
2843         case IPV6_2292HOPOPTS:
2844         case IPV6_RECVDSTOPTS:
2845         case IPV6_2292DSTOPTS:
2846         case IPV6_TCLASS:
2847         case IPV6_ADDR_PREFERENCES:
2848 #ifdef IPV6_RECVPATHMTU
2849         case IPV6_RECVPATHMTU:
2850 #endif
2851 #ifdef IPV6_TRANSPARENT
2852         case IPV6_TRANSPARENT:
2853 #endif
2854 #ifdef IPV6_FREEBIND
2855         case IPV6_FREEBIND:
2856 #endif
2857 #ifdef IPV6_RECVORIGDSTADDR
2858         case IPV6_RECVORIGDSTADDR:
2859 #endif
2860             if (get_user_u32(len, optlen))
2861                 return -TARGET_EFAULT;
2862             if (len < 0)
2863                 return -TARGET_EINVAL;
2864             lv = sizeof(lv);
2865             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2866             if (ret < 0)
2867                 return ret;
2868             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2869                 len = 1;
2870                 if (put_user_u32(len, optlen)
2871                     || put_user_u8(val, optval_addr))
2872                     return -TARGET_EFAULT;
2873             } else {
2874                 if (len > sizeof(int))
2875                     len = sizeof(int);
2876                 if (put_user_u32(len, optlen)
2877                     || put_user_u32(val, optval_addr))
2878                     return -TARGET_EFAULT;
2879             }
2880             break;
2881         default:
2882             ret = -TARGET_ENOPROTOOPT;
2883             break;
2884         }
2885         break;
2886 #ifdef SOL_NETLINK
2887     case SOL_NETLINK:
2888         switch (optname) {
2889         case NETLINK_PKTINFO:
2890         case NETLINK_BROADCAST_ERROR:
2891         case NETLINK_NO_ENOBUFS:
2892 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2893         case NETLINK_LISTEN_ALL_NSID:
2894         case NETLINK_CAP_ACK:
2895 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2896 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2897         case NETLINK_EXT_ACK:
2898 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2900         case NETLINK_GET_STRICT_CHK:
2901 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2902             if (get_user_u32(len, optlen)) {
2903                 return -TARGET_EFAULT;
2904             }
2905             if (len != sizeof(val)) {
2906                 return -TARGET_EINVAL;
2907             }
2908             lv = len;
2909             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2910             if (ret < 0) {
2911                 return ret;
2912             }
2913             if (put_user_u32(lv, optlen)
2914                 || put_user_u32(val, optval_addr)) {
2915                 return -TARGET_EFAULT;
2916             }
2917             break;
2918 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2919         case NETLINK_LIST_MEMBERSHIPS:
2920         {
2921             uint32_t *results;
2922             int i;
2923             if (get_user_u32(len, optlen)) {
2924                 return -TARGET_EFAULT;
2925             }
2926             if (len < 0) {
2927                 return -TARGET_EINVAL;
2928             }
2929             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2930             if (!results && len > 0) {
2931                 return -TARGET_EFAULT;
2932             }
2933             lv = len;
2934             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2935             if (ret < 0) {
2936                 unlock_user(results, optval_addr, 0);
2937                 return ret;
2938             }
2939             /* swap host endianess to target endianess. */
2940             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2941                 results[i] = tswap32(results[i]);
2942             }
2943             if (put_user_u32(lv, optlen)) {
2944                 return -TARGET_EFAULT;
2945             }
2946             unlock_user(results, optval_addr, 0);
2947             break;
2948         }
2949 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2950         default:
2951             goto unimplemented;
2952         }
2953         break;
2954 #endif /* SOL_NETLINK */
2955     default:
2956     unimplemented:
2957         qemu_log_mask(LOG_UNIMP,
2958                       "getsockopt level=%d optname=%d not yet supported\n",
2959                       level, optname);
2960         ret = -TARGET_EOPNOTSUPP;
2961         break;
2962     }
2963     return ret;
2964 }
2965 
2966 /* Convert target low/high pair representing file offset into the host
2967  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2968  * as the kernel doesn't handle them either.
2969  */
2970 static void target_to_host_low_high(abi_ulong tlow,
2971                                     abi_ulong thigh,
2972                                     unsigned long *hlow,
2973                                     unsigned long *hhigh)
2974 {
2975     uint64_t off = tlow |
2976         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2977         TARGET_LONG_BITS / 2;
2978 
2979     *hlow = off;
2980     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2981 }
2982 
2983 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2984                                 abi_ulong count, int copy)
2985 {
2986     struct target_iovec *target_vec;
2987     struct iovec *vec;
2988     abi_ulong total_len, max_len;
2989     int i;
2990     int err = 0;
2991     bool bad_address = false;
2992 
2993     if (count == 0) {
2994         errno = 0;
2995         return NULL;
2996     }
2997     if (count > IOV_MAX) {
2998         errno = EINVAL;
2999         return NULL;
3000     }
3001 
3002     vec = g_try_new0(struct iovec, count);
3003     if (vec == NULL) {
3004         errno = ENOMEM;
3005         return NULL;
3006     }
3007 
3008     target_vec = lock_user(VERIFY_READ, target_addr,
3009                            count * sizeof(struct target_iovec), 1);
3010     if (target_vec == NULL) {
3011         err = EFAULT;
3012         goto fail2;
3013     }
3014 
3015     /* ??? If host page size > target page size, this will result in a
3016        value larger than what we can actually support.  */
3017     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3018     total_len = 0;
3019 
3020     for (i = 0; i < count; i++) {
3021         abi_ulong base = tswapal(target_vec[i].iov_base);
3022         abi_long len = tswapal(target_vec[i].iov_len);
3023 
3024         if (len < 0) {
3025             err = EINVAL;
3026             goto fail;
3027         } else if (len == 0) {
3028             /* Zero length pointer is ignored.  */
3029             vec[i].iov_base = 0;
3030         } else {
3031             vec[i].iov_base = lock_user(type, base, len, copy);
3032             /* If the first buffer pointer is bad, this is a fault.  But
3033              * subsequent bad buffers will result in a partial write; this
3034              * is realized by filling the vector with null pointers and
3035              * zero lengths. */
3036             if (!vec[i].iov_base) {
3037                 if (i == 0) {
3038                     err = EFAULT;
3039                     goto fail;
3040                 } else {
3041                     bad_address = true;
3042                 }
3043             }
3044             if (bad_address) {
3045                 len = 0;
3046             }
3047             if (len > max_len - total_len) {
3048                 len = max_len - total_len;
3049             }
3050         }
3051         vec[i].iov_len = len;
3052         total_len += len;
3053     }
3054 
3055     unlock_user(target_vec, target_addr, 0);
3056     return vec;
3057 
3058  fail:
3059     while (--i >= 0) {
3060         if (tswapal(target_vec[i].iov_len) > 0) {
3061             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3062         }
3063     }
3064     unlock_user(target_vec, target_addr, 0);
3065  fail2:
3066     g_free(vec);
3067     errno = err;
3068     return NULL;
3069 }
3070 
3071 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3072                          abi_ulong count, int copy)
3073 {
3074     struct target_iovec *target_vec;
3075     int i;
3076 
3077     target_vec = lock_user(VERIFY_READ, target_addr,
3078                            count * sizeof(struct target_iovec), 1);
3079     if (target_vec) {
3080         for (i = 0; i < count; i++) {
3081             abi_ulong base = tswapal(target_vec[i].iov_base);
3082             abi_long len = tswapal(target_vec[i].iov_len);
3083             if (len < 0) {
3084                 break;
3085             }
3086             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3087         }
3088         unlock_user(target_vec, target_addr, 0);
3089     }
3090 
3091     g_free(vec);
3092 }
3093 
3094 static inline int target_to_host_sock_type(int *type)
3095 {
3096     int host_type = 0;
3097     int target_type = *type;
3098 
3099     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3100     case TARGET_SOCK_DGRAM:
3101         host_type = SOCK_DGRAM;
3102         break;
3103     case TARGET_SOCK_STREAM:
3104         host_type = SOCK_STREAM;
3105         break;
3106     default:
3107         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3108         break;
3109     }
3110     if (target_type & TARGET_SOCK_CLOEXEC) {
3111 #if defined(SOCK_CLOEXEC)
3112         host_type |= SOCK_CLOEXEC;
3113 #else
3114         return -TARGET_EINVAL;
3115 #endif
3116     }
3117     if (target_type & TARGET_SOCK_NONBLOCK) {
3118 #if defined(SOCK_NONBLOCK)
3119         host_type |= SOCK_NONBLOCK;
3120 #elif !defined(O_NONBLOCK)
3121         return -TARGET_EINVAL;
3122 #endif
3123     }
3124     *type = host_type;
3125     return 0;
3126 }
3127 
3128 /* Try to emulate socket type flags after socket creation.  */
3129 static int sock_flags_fixup(int fd, int target_type)
3130 {
3131 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3132     if (target_type & TARGET_SOCK_NONBLOCK) {
3133         int flags = fcntl(fd, F_GETFL);
3134         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3135             close(fd);
3136             return -TARGET_EINVAL;
3137         }
3138     }
3139 #endif
3140     return fd;
3141 }
3142 
3143 /* do_socket() Must return target values and target errnos. */
3144 static abi_long do_socket(int domain, int type, int protocol)
3145 {
3146     int target_type = type;
3147     int ret;
3148 
3149     ret = target_to_host_sock_type(&type);
3150     if (ret) {
3151         return ret;
3152     }
3153 
3154     if (domain == PF_NETLINK && !(
3155 #ifdef CONFIG_RTNETLINK
3156          protocol == NETLINK_ROUTE ||
3157 #endif
3158          protocol == NETLINK_KOBJECT_UEVENT ||
3159          protocol == NETLINK_AUDIT)) {
3160         return -TARGET_EPROTONOSUPPORT;
3161     }
3162 
3163     if (domain == AF_PACKET ||
3164         (domain == AF_INET && type == SOCK_PACKET)) {
3165         protocol = tswap16(protocol);
3166     }
3167 
3168     ret = get_errno(socket(domain, type, protocol));
3169     if (ret >= 0) {
3170         ret = sock_flags_fixup(ret, target_type);
3171         if (type == SOCK_PACKET) {
3172             /* Manage an obsolete case :
3173              * if socket type is SOCK_PACKET, bind by name
3174              */
3175             fd_trans_register(ret, &target_packet_trans);
3176         } else if (domain == PF_NETLINK) {
3177             switch (protocol) {
3178 #ifdef CONFIG_RTNETLINK
3179             case NETLINK_ROUTE:
3180                 fd_trans_register(ret, &target_netlink_route_trans);
3181                 break;
3182 #endif
3183             case NETLINK_KOBJECT_UEVENT:
3184                 /* nothing to do: messages are strings */
3185                 break;
3186             case NETLINK_AUDIT:
3187                 fd_trans_register(ret, &target_netlink_audit_trans);
3188                 break;
3189             default:
3190                 g_assert_not_reached();
3191             }
3192         }
3193     }
3194     return ret;
3195 }
3196 
3197 /* do_bind() Must return target values and target errnos. */
3198 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3199                         socklen_t addrlen)
3200 {
3201     void *addr;
3202     abi_long ret;
3203 
3204     if ((int)addrlen < 0) {
3205         return -TARGET_EINVAL;
3206     }
3207 
3208     addr = alloca(addrlen+1);
3209 
3210     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3211     if (ret)
3212         return ret;
3213 
3214     return get_errno(bind(sockfd, addr, addrlen));
3215 }
3216 
3217 /* do_connect() Must return target values and target errnos. */
3218 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3219                            socklen_t addrlen)
3220 {
3221     void *addr;
3222     abi_long ret;
3223 
3224     if ((int)addrlen < 0) {
3225         return -TARGET_EINVAL;
3226     }
3227 
3228     addr = alloca(addrlen+1);
3229 
3230     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3231     if (ret)
3232         return ret;
3233 
3234     return get_errno(safe_connect(sockfd, addr, addrlen));
3235 }
3236 
3237 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3238 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3239                                       int flags, int send)
3240 {
3241     abi_long ret, len;
3242     struct msghdr msg;
3243     abi_ulong count;
3244     struct iovec *vec;
3245     abi_ulong target_vec;
3246 
3247     if (msgp->msg_name) {
3248         msg.msg_namelen = tswap32(msgp->msg_namelen);
3249         msg.msg_name = alloca(msg.msg_namelen+1);
3250         ret = target_to_host_sockaddr(fd, msg.msg_name,
3251                                       tswapal(msgp->msg_name),
3252                                       msg.msg_namelen);
3253         if (ret == -TARGET_EFAULT) {
3254             /* For connected sockets msg_name and msg_namelen must
3255              * be ignored, so returning EFAULT immediately is wrong.
3256              * Instead, pass a bad msg_name to the host kernel, and
3257              * let it decide whether to return EFAULT or not.
3258              */
3259             msg.msg_name = (void *)-1;
3260         } else if (ret) {
3261             goto out2;
3262         }
3263     } else {
3264         msg.msg_name = NULL;
3265         msg.msg_namelen = 0;
3266     }
3267     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3268     msg.msg_control = alloca(msg.msg_controllen);
3269     memset(msg.msg_control, 0, msg.msg_controllen);
3270 
3271     msg.msg_flags = tswap32(msgp->msg_flags);
3272 
3273     count = tswapal(msgp->msg_iovlen);
3274     target_vec = tswapal(msgp->msg_iov);
3275 
3276     if (count > IOV_MAX) {
3277         /* sendrcvmsg returns a different errno for this condition than
3278          * readv/writev, so we must catch it here before lock_iovec() does.
3279          */
3280         ret = -TARGET_EMSGSIZE;
3281         goto out2;
3282     }
3283 
3284     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3285                      target_vec, count, send);
3286     if (vec == NULL) {
3287         ret = -host_to_target_errno(errno);
3288         goto out2;
3289     }
3290     msg.msg_iovlen = count;
3291     msg.msg_iov = vec;
3292 
3293     if (send) {
3294         if (fd_trans_target_to_host_data(fd)) {
3295             void *host_msg;
3296 
3297             host_msg = g_malloc(msg.msg_iov->iov_len);
3298             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3299             ret = fd_trans_target_to_host_data(fd)(host_msg,
3300                                                    msg.msg_iov->iov_len);
3301             if (ret >= 0) {
3302                 msg.msg_iov->iov_base = host_msg;
3303                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3304             }
3305             g_free(host_msg);
3306         } else {
3307             ret = target_to_host_cmsg(&msg, msgp);
3308             if (ret == 0) {
3309                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3310             }
3311         }
3312     } else {
3313         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3314         if (!is_error(ret)) {
3315             len = ret;
3316             if (fd_trans_host_to_target_data(fd)) {
3317                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3318                                                MIN(msg.msg_iov->iov_len, len));
3319             }
3320             if (!is_error(ret)) {
3321                 ret = host_to_target_cmsg(msgp, &msg);
3322             }
3323             if (!is_error(ret)) {
3324                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3325                 msgp->msg_flags = tswap32(msg.msg_flags);
3326                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3327                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3328                                     msg.msg_name, msg.msg_namelen);
3329                     if (ret) {
3330                         goto out;
3331                     }
3332                 }
3333 
3334                 ret = len;
3335             }
3336         }
3337     }
3338 
3339 out:
3340     unlock_iovec(vec, target_vec, count, !send);
3341 out2:
3342     return ret;
3343 }
3344 
3345 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3346                                int flags, int send)
3347 {
3348     abi_long ret;
3349     struct target_msghdr *msgp;
3350 
3351     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3352                           msgp,
3353                           target_msg,
3354                           send ? 1 : 0)) {
3355         return -TARGET_EFAULT;
3356     }
3357     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3358     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3359     return ret;
3360 }
3361 
3362 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3363  * so it might not have this *mmsg-specific flag either.
3364  */
3365 #ifndef MSG_WAITFORONE
3366 #define MSG_WAITFORONE 0x10000
3367 #endif
3368 
3369 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3370                                 unsigned int vlen, unsigned int flags,
3371                                 int send)
3372 {
3373     struct target_mmsghdr *mmsgp;
3374     abi_long ret = 0;
3375     int i;
3376 
3377     if (vlen > UIO_MAXIOV) {
3378         vlen = UIO_MAXIOV;
3379     }
3380 
3381     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3382     if (!mmsgp) {
3383         return -TARGET_EFAULT;
3384     }
3385 
3386     for (i = 0; i < vlen; i++) {
3387         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3388         if (is_error(ret)) {
3389             break;
3390         }
3391         mmsgp[i].msg_len = tswap32(ret);
3392         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3393         if (flags & MSG_WAITFORONE) {
3394             flags |= MSG_DONTWAIT;
3395         }
3396     }
3397 
3398     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3399 
3400     /* Return number of datagrams sent if we sent any at all;
3401      * otherwise return the error.
3402      */
3403     if (i) {
3404         return i;
3405     }
3406     return ret;
3407 }
3408 
3409 /* do_accept4() Must return target values and target errnos. */
3410 static abi_long do_accept4(int fd, abi_ulong target_addr,
3411                            abi_ulong target_addrlen_addr, int flags)
3412 {
3413     socklen_t addrlen, ret_addrlen;
3414     void *addr;
3415     abi_long ret;
3416     int host_flags;
3417 
3418     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3419 
3420     if (target_addr == 0) {
3421         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3422     }
3423 
3424     /* linux returns EFAULT if addrlen pointer is invalid */
3425     if (get_user_u32(addrlen, target_addrlen_addr))
3426         return -TARGET_EFAULT;
3427 
3428     if ((int)addrlen < 0) {
3429         return -TARGET_EINVAL;
3430     }
3431 
3432     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3433         return -TARGET_EFAULT;
3434     }
3435 
3436     addr = alloca(addrlen);
3437 
3438     ret_addrlen = addrlen;
3439     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3440     if (!is_error(ret)) {
3441         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3442         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3443             ret = -TARGET_EFAULT;
3444         }
3445     }
3446     return ret;
3447 }
3448 
3449 /* do_getpeername() Must return target values and target errnos. */
3450 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3451                                abi_ulong target_addrlen_addr)
3452 {
3453     socklen_t addrlen, ret_addrlen;
3454     void *addr;
3455     abi_long ret;
3456 
3457     if (get_user_u32(addrlen, target_addrlen_addr))
3458         return -TARGET_EFAULT;
3459 
3460     if ((int)addrlen < 0) {
3461         return -TARGET_EINVAL;
3462     }
3463 
3464     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3465         return -TARGET_EFAULT;
3466     }
3467 
3468     addr = alloca(addrlen);
3469 
3470     ret_addrlen = addrlen;
3471     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3472     if (!is_error(ret)) {
3473         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3474         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3475             ret = -TARGET_EFAULT;
3476         }
3477     }
3478     return ret;
3479 }
3480 
3481 /* do_getsockname() Must return target values and target errnos. */
3482 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3483                                abi_ulong target_addrlen_addr)
3484 {
3485     socklen_t addrlen, ret_addrlen;
3486     void *addr;
3487     abi_long ret;
3488 
3489     if (get_user_u32(addrlen, target_addrlen_addr))
3490         return -TARGET_EFAULT;
3491 
3492     if ((int)addrlen < 0) {
3493         return -TARGET_EINVAL;
3494     }
3495 
3496     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3497         return -TARGET_EFAULT;
3498     }
3499 
3500     addr = alloca(addrlen);
3501 
3502     ret_addrlen = addrlen;
3503     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3504     if (!is_error(ret)) {
3505         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3506         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3507             ret = -TARGET_EFAULT;
3508         }
3509     }
3510     return ret;
3511 }
3512 
3513 /* do_socketpair() Must return target values and target errnos. */
3514 static abi_long do_socketpair(int domain, int type, int protocol,
3515                               abi_ulong target_tab_addr)
3516 {
3517     int tab[2];
3518     abi_long ret;
3519 
3520     target_to_host_sock_type(&type);
3521 
3522     ret = get_errno(socketpair(domain, type, protocol, tab));
3523     if (!is_error(ret)) {
3524         if (put_user_s32(tab[0], target_tab_addr)
3525             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3526             ret = -TARGET_EFAULT;
3527     }
3528     return ret;
3529 }
3530 
3531 /* do_sendto() Must return target values and target errnos. */
3532 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3533                           abi_ulong target_addr, socklen_t addrlen)
3534 {
3535     void *addr;
3536     void *host_msg;
3537     void *copy_msg = NULL;
3538     abi_long ret;
3539 
3540     if ((int)addrlen < 0) {
3541         return -TARGET_EINVAL;
3542     }
3543 
3544     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3545     if (!host_msg)
3546         return -TARGET_EFAULT;
3547     if (fd_trans_target_to_host_data(fd)) {
3548         copy_msg = host_msg;
3549         host_msg = g_malloc(len);
3550         memcpy(host_msg, copy_msg, len);
3551         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3552         if (ret < 0) {
3553             goto fail;
3554         }
3555     }
3556     if (target_addr) {
3557         addr = alloca(addrlen+1);
3558         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3559         if (ret) {
3560             goto fail;
3561         }
3562         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3563     } else {
3564         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3565     }
3566 fail:
3567     if (copy_msg) {
3568         g_free(host_msg);
3569         host_msg = copy_msg;
3570     }
3571     unlock_user(host_msg, msg, 0);
3572     return ret;
3573 }
3574 
3575 /* do_recvfrom() Must return target values and target errnos. */
3576 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3577                             abi_ulong target_addr,
3578                             abi_ulong target_addrlen)
3579 {
3580     socklen_t addrlen, ret_addrlen;
3581     void *addr;
3582     void *host_msg;
3583     abi_long ret;
3584 
3585     if (!msg) {
3586         host_msg = NULL;
3587     } else {
3588         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3589         if (!host_msg) {
3590             return -TARGET_EFAULT;
3591         }
3592     }
3593     if (target_addr) {
3594         if (get_user_u32(addrlen, target_addrlen)) {
3595             ret = -TARGET_EFAULT;
3596             goto fail;
3597         }
3598         if ((int)addrlen < 0) {
3599             ret = -TARGET_EINVAL;
3600             goto fail;
3601         }
3602         addr = alloca(addrlen);
3603         ret_addrlen = addrlen;
3604         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3605                                       addr, &ret_addrlen));
3606     } else {
3607         addr = NULL; /* To keep compiler quiet.  */
3608         addrlen = 0; /* To keep compiler quiet.  */
3609         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3610     }
3611     if (!is_error(ret)) {
3612         if (fd_trans_host_to_target_data(fd)) {
3613             abi_long trans;
3614             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3615             if (is_error(trans)) {
3616                 ret = trans;
3617                 goto fail;
3618             }
3619         }
3620         if (target_addr) {
3621             host_to_target_sockaddr(target_addr, addr,
3622                                     MIN(addrlen, ret_addrlen));
3623             if (put_user_u32(ret_addrlen, target_addrlen)) {
3624                 ret = -TARGET_EFAULT;
3625                 goto fail;
3626             }
3627         }
3628         unlock_user(host_msg, msg, len);
3629     } else {
3630 fail:
3631         unlock_user(host_msg, msg, 0);
3632     }
3633     return ret;
3634 }
3635 
3636 #ifdef TARGET_NR_socketcall
3637 /* do_socketcall() must return target values and target errnos. */
3638 static abi_long do_socketcall(int num, abi_ulong vptr)
3639 {
3640     static const unsigned nargs[] = { /* number of arguments per operation */
3641         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3642         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3643         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3644         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3645         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3646         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3647         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3648         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3649         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3650         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3651         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3652         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3653         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3654         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3655         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3656         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3657         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3658         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3659         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3660         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3661     };
3662     abi_long a[6]; /* max 6 args */
3663     unsigned i;
3664 
3665     /* check the range of the first argument num */
3666     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3667     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3668         return -TARGET_EINVAL;
3669     }
3670     /* ensure we have space for args */
3671     if (nargs[num] > ARRAY_SIZE(a)) {
3672         return -TARGET_EINVAL;
3673     }
3674     /* collect the arguments in a[] according to nargs[] */
3675     for (i = 0; i < nargs[num]; ++i) {
3676         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3677             return -TARGET_EFAULT;
3678         }
3679     }
3680     /* now when we have the args, invoke the appropriate underlying function */
3681     switch (num) {
3682     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3683         return do_socket(a[0], a[1], a[2]);
3684     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3685         return do_bind(a[0], a[1], a[2]);
3686     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3687         return do_connect(a[0], a[1], a[2]);
3688     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3689         return get_errno(listen(a[0], a[1]));
3690     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3691         return do_accept4(a[0], a[1], a[2], 0);
3692     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3693         return do_getsockname(a[0], a[1], a[2]);
3694     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3695         return do_getpeername(a[0], a[1], a[2]);
3696     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3697         return do_socketpair(a[0], a[1], a[2], a[3]);
3698     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3699         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3700     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3701         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3702     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3703         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3704     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3705         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3706     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3707         return get_errno(shutdown(a[0], a[1]));
3708     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3709         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3710     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3711         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3712     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3713         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3714     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3715         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3716     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3717         return do_accept4(a[0], a[1], a[2], a[3]);
3718     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3719         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3720     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3721         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3722     default:
3723         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3724         return -TARGET_EINVAL;
3725     }
3726 }
3727 #endif
3728 
3729 #define N_SHM_REGIONS	32
3730 
3731 static struct shm_region {
3732     abi_ulong start;
3733     abi_ulong size;
3734     bool in_use;
3735 } shm_regions[N_SHM_REGIONS];
3736 
3737 #ifndef TARGET_SEMID64_DS
3738 /* asm-generic version of this struct */
3739 struct target_semid64_ds
3740 {
3741   struct target_ipc_perm sem_perm;
3742   abi_ulong sem_otime;
3743 #if TARGET_ABI_BITS == 32
3744   abi_ulong __unused1;
3745 #endif
3746   abi_ulong sem_ctime;
3747 #if TARGET_ABI_BITS == 32
3748   abi_ulong __unused2;
3749 #endif
3750   abi_ulong sem_nsems;
3751   abi_ulong __unused3;
3752   abi_ulong __unused4;
3753 };
3754 #endif
3755 
3756 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3757                                                abi_ulong target_addr)
3758 {
3759     struct target_ipc_perm *target_ip;
3760     struct target_semid64_ds *target_sd;
3761 
3762     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3763         return -TARGET_EFAULT;
3764     target_ip = &(target_sd->sem_perm);
3765     host_ip->__key = tswap32(target_ip->__key);
3766     host_ip->uid = tswap32(target_ip->uid);
3767     host_ip->gid = tswap32(target_ip->gid);
3768     host_ip->cuid = tswap32(target_ip->cuid);
3769     host_ip->cgid = tswap32(target_ip->cgid);
3770 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3771     host_ip->mode = tswap32(target_ip->mode);
3772 #else
3773     host_ip->mode = tswap16(target_ip->mode);
3774 #endif
3775 #if defined(TARGET_PPC)
3776     host_ip->__seq = tswap32(target_ip->__seq);
3777 #else
3778     host_ip->__seq = tswap16(target_ip->__seq);
3779 #endif
3780     unlock_user_struct(target_sd, target_addr, 0);
3781     return 0;
3782 }
3783 
3784 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3785                                                struct ipc_perm *host_ip)
3786 {
3787     struct target_ipc_perm *target_ip;
3788     struct target_semid64_ds *target_sd;
3789 
3790     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3791         return -TARGET_EFAULT;
3792     target_ip = &(target_sd->sem_perm);
3793     target_ip->__key = tswap32(host_ip->__key);
3794     target_ip->uid = tswap32(host_ip->uid);
3795     target_ip->gid = tswap32(host_ip->gid);
3796     target_ip->cuid = tswap32(host_ip->cuid);
3797     target_ip->cgid = tswap32(host_ip->cgid);
3798 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3799     target_ip->mode = tswap32(host_ip->mode);
3800 #else
3801     target_ip->mode = tswap16(host_ip->mode);
3802 #endif
3803 #if defined(TARGET_PPC)
3804     target_ip->__seq = tswap32(host_ip->__seq);
3805 #else
3806     target_ip->__seq = tswap16(host_ip->__seq);
3807 #endif
3808     unlock_user_struct(target_sd, target_addr, 1);
3809     return 0;
3810 }
3811 
3812 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3813                                                abi_ulong target_addr)
3814 {
3815     struct target_semid64_ds *target_sd;
3816 
3817     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3818         return -TARGET_EFAULT;
3819     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3820         return -TARGET_EFAULT;
3821     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3822     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3823     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3824     unlock_user_struct(target_sd, target_addr, 0);
3825     return 0;
3826 }
3827 
3828 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3829                                                struct semid_ds *host_sd)
3830 {
3831     struct target_semid64_ds *target_sd;
3832 
3833     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3834         return -TARGET_EFAULT;
3835     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3836         return -TARGET_EFAULT;
3837     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3838     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3839     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3840     unlock_user_struct(target_sd, target_addr, 1);
3841     return 0;
3842 }
3843 
3844 struct target_seminfo {
3845     int semmap;
3846     int semmni;
3847     int semmns;
3848     int semmnu;
3849     int semmsl;
3850     int semopm;
3851     int semume;
3852     int semusz;
3853     int semvmx;
3854     int semaem;
3855 };
3856 
3857 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3858                                               struct seminfo *host_seminfo)
3859 {
3860     struct target_seminfo *target_seminfo;
3861     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3862         return -TARGET_EFAULT;
3863     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3864     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3865     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3866     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3867     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3868     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3869     __put_user(host_seminfo->semume, &target_seminfo->semume);
3870     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3871     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3872     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3873     unlock_user_struct(target_seminfo, target_addr, 1);
3874     return 0;
3875 }
3876 
3877 union semun {
3878 	int val;
3879 	struct semid_ds *buf;
3880 	unsigned short *array;
3881 	struct seminfo *__buf;
3882 };
3883 
3884 union target_semun {
3885 	int val;
3886 	abi_ulong buf;
3887 	abi_ulong array;
3888 	abi_ulong __buf;
3889 };
3890 
3891 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3892                                                abi_ulong target_addr)
3893 {
3894     int nsems;
3895     unsigned short *array;
3896     union semun semun;
3897     struct semid_ds semid_ds;
3898     int i, ret;
3899 
3900     semun.buf = &semid_ds;
3901 
3902     ret = semctl(semid, 0, IPC_STAT, semun);
3903     if (ret == -1)
3904         return get_errno(ret);
3905 
3906     nsems = semid_ds.sem_nsems;
3907 
3908     *host_array = g_try_new(unsigned short, nsems);
3909     if (!*host_array) {
3910         return -TARGET_ENOMEM;
3911     }
3912     array = lock_user(VERIFY_READ, target_addr,
3913                       nsems*sizeof(unsigned short), 1);
3914     if (!array) {
3915         g_free(*host_array);
3916         return -TARGET_EFAULT;
3917     }
3918 
3919     for(i=0; i<nsems; i++) {
3920         __get_user((*host_array)[i], &array[i]);
3921     }
3922     unlock_user(array, target_addr, 0);
3923 
3924     return 0;
3925 }
3926 
3927 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3928                                                unsigned short **host_array)
3929 {
3930     int nsems;
3931     unsigned short *array;
3932     union semun semun;
3933     struct semid_ds semid_ds;
3934     int i, ret;
3935 
3936     semun.buf = &semid_ds;
3937 
3938     ret = semctl(semid, 0, IPC_STAT, semun);
3939     if (ret == -1)
3940         return get_errno(ret);
3941 
3942     nsems = semid_ds.sem_nsems;
3943 
3944     array = lock_user(VERIFY_WRITE, target_addr,
3945                       nsems*sizeof(unsigned short), 0);
3946     if (!array)
3947         return -TARGET_EFAULT;
3948 
3949     for(i=0; i<nsems; i++) {
3950         __put_user((*host_array)[i], &array[i]);
3951     }
3952     g_free(*host_array);
3953     unlock_user(array, target_addr, 1);
3954 
3955     return 0;
3956 }
3957 
3958 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3959                                  abi_ulong target_arg)
3960 {
3961     union target_semun target_su = { .buf = target_arg };
3962     union semun arg;
3963     struct semid_ds dsarg;
3964     unsigned short *array = NULL;
3965     struct seminfo seminfo;
3966     abi_long ret = -TARGET_EINVAL;
3967     abi_long err;
3968     cmd &= 0xff;
3969 
3970     switch( cmd ) {
3971 	case GETVAL:
3972 	case SETVAL:
3973             /* In 64 bit cross-endian situations, we will erroneously pick up
3974              * the wrong half of the union for the "val" element.  To rectify
3975              * this, the entire 8-byte structure is byteswapped, followed by
3976 	     * a swap of the 4 byte val field. In other cases, the data is
3977 	     * already in proper host byte order. */
3978 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3979 		target_su.buf = tswapal(target_su.buf);
3980 		arg.val = tswap32(target_su.val);
3981 	    } else {
3982 		arg.val = target_su.val;
3983 	    }
3984             ret = get_errno(semctl(semid, semnum, cmd, arg));
3985             break;
3986 	case GETALL:
3987 	case SETALL:
3988             err = target_to_host_semarray(semid, &array, target_su.array);
3989             if (err)
3990                 return err;
3991             arg.array = array;
3992             ret = get_errno(semctl(semid, semnum, cmd, arg));
3993             err = host_to_target_semarray(semid, target_su.array, &array);
3994             if (err)
3995                 return err;
3996             break;
3997 	case IPC_STAT:
3998 	case IPC_SET:
3999 	case SEM_STAT:
4000             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4001             if (err)
4002                 return err;
4003             arg.buf = &dsarg;
4004             ret = get_errno(semctl(semid, semnum, cmd, arg));
4005             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4006             if (err)
4007                 return err;
4008             break;
4009 	case IPC_INFO:
4010 	case SEM_INFO:
4011             arg.__buf = &seminfo;
4012             ret = get_errno(semctl(semid, semnum, cmd, arg));
4013             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4014             if (err)
4015                 return err;
4016             break;
4017 	case IPC_RMID:
4018 	case GETPID:
4019 	case GETNCNT:
4020 	case GETZCNT:
4021             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4022             break;
4023     }
4024 
4025     return ret;
4026 }
4027 
4028 struct target_sembuf {
4029     unsigned short sem_num;
4030     short sem_op;
4031     short sem_flg;
4032 };
4033 
4034 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4035                                              abi_ulong target_addr,
4036                                              unsigned nsops)
4037 {
4038     struct target_sembuf *target_sembuf;
4039     int i;
4040 
4041     target_sembuf = lock_user(VERIFY_READ, target_addr,
4042                               nsops*sizeof(struct target_sembuf), 1);
4043     if (!target_sembuf)
4044         return -TARGET_EFAULT;
4045 
4046     for(i=0; i<nsops; i++) {
4047         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4048         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4049         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4050     }
4051 
4052     unlock_user(target_sembuf, target_addr, 0);
4053 
4054     return 0;
4055 }
4056 
4057 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4058     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4059 
4060 /*
4061  * This macro is required to handle the s390 variants, which passes the
4062  * arguments in a different order than default.
4063  */
4064 #ifdef __s390x__
4065 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4066   (__nsops), (__timeout), (__sops)
4067 #else
4068 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4069   (__nsops), 0, (__sops), (__timeout)
4070 #endif
4071 
4072 static inline abi_long do_semtimedop(int semid,
4073                                      abi_long ptr,
4074                                      unsigned nsops,
4075                                      abi_long timeout, bool time64)
4076 {
4077     struct sembuf *sops;
4078     struct timespec ts, *pts = NULL;
4079     abi_long ret;
4080 
4081     if (timeout) {
4082         pts = &ts;
4083         if (time64) {
4084             if (target_to_host_timespec64(pts, timeout)) {
4085                 return -TARGET_EFAULT;
4086             }
4087         } else {
4088             if (target_to_host_timespec(pts, timeout)) {
4089                 return -TARGET_EFAULT;
4090             }
4091         }
4092     }
4093 
4094     if (nsops > TARGET_SEMOPM) {
4095         return -TARGET_E2BIG;
4096     }
4097 
4098     sops = g_new(struct sembuf, nsops);
4099 
4100     if (target_to_host_sembuf(sops, ptr, nsops)) {
4101         g_free(sops);
4102         return -TARGET_EFAULT;
4103     }
4104 
4105     ret = -TARGET_ENOSYS;
4106 #ifdef __NR_semtimedop
4107     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4108 #endif
4109 #ifdef __NR_ipc
4110     if (ret == -TARGET_ENOSYS) {
4111         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4112                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4113     }
4114 #endif
4115     g_free(sops);
4116     return ret;
4117 }
4118 #endif
4119 
4120 struct target_msqid_ds
4121 {
4122     struct target_ipc_perm msg_perm;
4123     abi_ulong msg_stime;
4124 #if TARGET_ABI_BITS == 32
4125     abi_ulong __unused1;
4126 #endif
4127     abi_ulong msg_rtime;
4128 #if TARGET_ABI_BITS == 32
4129     abi_ulong __unused2;
4130 #endif
4131     abi_ulong msg_ctime;
4132 #if TARGET_ABI_BITS == 32
4133     abi_ulong __unused3;
4134 #endif
4135     abi_ulong __msg_cbytes;
4136     abi_ulong msg_qnum;
4137     abi_ulong msg_qbytes;
4138     abi_ulong msg_lspid;
4139     abi_ulong msg_lrpid;
4140     abi_ulong __unused4;
4141     abi_ulong __unused5;
4142 };
4143 
4144 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4145                                                abi_ulong target_addr)
4146 {
4147     struct target_msqid_ds *target_md;
4148 
4149     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4150         return -TARGET_EFAULT;
4151     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4152         return -TARGET_EFAULT;
4153     host_md->msg_stime = tswapal(target_md->msg_stime);
4154     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4155     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4156     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4157     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4158     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4159     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4160     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4161     unlock_user_struct(target_md, target_addr, 0);
4162     return 0;
4163 }
4164 
4165 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4166                                                struct msqid_ds *host_md)
4167 {
4168     struct target_msqid_ds *target_md;
4169 
4170     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4171         return -TARGET_EFAULT;
4172     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4173         return -TARGET_EFAULT;
4174     target_md->msg_stime = tswapal(host_md->msg_stime);
4175     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4176     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4177     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4178     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4179     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4180     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4181     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4182     unlock_user_struct(target_md, target_addr, 1);
4183     return 0;
4184 }
4185 
4186 struct target_msginfo {
4187     int msgpool;
4188     int msgmap;
4189     int msgmax;
4190     int msgmnb;
4191     int msgmni;
4192     int msgssz;
4193     int msgtql;
4194     unsigned short int msgseg;
4195 };
4196 
4197 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4198                                               struct msginfo *host_msginfo)
4199 {
4200     struct target_msginfo *target_msginfo;
4201     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4202         return -TARGET_EFAULT;
4203     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4204     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4205     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4206     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4207     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4208     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4209     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4210     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4211     unlock_user_struct(target_msginfo, target_addr, 1);
4212     return 0;
4213 }
4214 
4215 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4216 {
4217     struct msqid_ds dsarg;
4218     struct msginfo msginfo;
4219     abi_long ret = -TARGET_EINVAL;
4220 
4221     cmd &= 0xff;
4222 
4223     switch (cmd) {
4224     case IPC_STAT:
4225     case IPC_SET:
4226     case MSG_STAT:
4227         if (target_to_host_msqid_ds(&dsarg,ptr))
4228             return -TARGET_EFAULT;
4229         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4230         if (host_to_target_msqid_ds(ptr,&dsarg))
4231             return -TARGET_EFAULT;
4232         break;
4233     case IPC_RMID:
4234         ret = get_errno(msgctl(msgid, cmd, NULL));
4235         break;
4236     case IPC_INFO:
4237     case MSG_INFO:
4238         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4239         if (host_to_target_msginfo(ptr, &msginfo))
4240             return -TARGET_EFAULT;
4241         break;
4242     }
4243 
4244     return ret;
4245 }
4246 
4247 struct target_msgbuf {
4248     abi_long mtype;
4249     char	mtext[1];
4250 };
4251 
4252 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4253                                  ssize_t msgsz, int msgflg)
4254 {
4255     struct target_msgbuf *target_mb;
4256     struct msgbuf *host_mb;
4257     abi_long ret = 0;
4258 
4259     if (msgsz < 0) {
4260         return -TARGET_EINVAL;
4261     }
4262 
4263     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4264         return -TARGET_EFAULT;
4265     host_mb = g_try_malloc(msgsz + sizeof(long));
4266     if (!host_mb) {
4267         unlock_user_struct(target_mb, msgp, 0);
4268         return -TARGET_ENOMEM;
4269     }
4270     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4271     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4272     ret = -TARGET_ENOSYS;
4273 #ifdef __NR_msgsnd
4274     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4275 #endif
4276 #ifdef __NR_ipc
4277     if (ret == -TARGET_ENOSYS) {
4278 #ifdef __s390x__
4279         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4280                                  host_mb));
4281 #else
4282         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4283                                  host_mb, 0));
4284 #endif
4285     }
4286 #endif
4287     g_free(host_mb);
4288     unlock_user_struct(target_mb, msgp, 0);
4289 
4290     return ret;
4291 }
4292 
4293 #ifdef __NR_ipc
4294 #if defined(__sparc__)
4295 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4296 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4297 #elif defined(__s390x__)
4298 /* The s390 sys_ipc variant has only five parameters.  */
4299 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4300     ((long int[]){(long int)__msgp, __msgtyp})
4301 #else
4302 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4303     ((long int[]){(long int)__msgp, __msgtyp}), 0
4304 #endif
4305 #endif
4306 
4307 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4308                                  ssize_t msgsz, abi_long msgtyp,
4309                                  int msgflg)
4310 {
4311     struct target_msgbuf *target_mb;
4312     char *target_mtext;
4313     struct msgbuf *host_mb;
4314     abi_long ret = 0;
4315 
4316     if (msgsz < 0) {
4317         return -TARGET_EINVAL;
4318     }
4319 
4320     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4321         return -TARGET_EFAULT;
4322 
4323     host_mb = g_try_malloc(msgsz + sizeof(long));
4324     if (!host_mb) {
4325         ret = -TARGET_ENOMEM;
4326         goto end;
4327     }
4328     ret = -TARGET_ENOSYS;
4329 #ifdef __NR_msgrcv
4330     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4331 #endif
4332 #ifdef __NR_ipc
4333     if (ret == -TARGET_ENOSYS) {
4334         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4335                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4336     }
4337 #endif
4338 
4339     if (ret > 0) {
4340         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4341         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4342         if (!target_mtext) {
4343             ret = -TARGET_EFAULT;
4344             goto end;
4345         }
4346         memcpy(target_mb->mtext, host_mb->mtext, ret);
4347         unlock_user(target_mtext, target_mtext_addr, ret);
4348     }
4349 
4350     target_mb->mtype = tswapal(host_mb->mtype);
4351 
4352 end:
4353     if (target_mb)
4354         unlock_user_struct(target_mb, msgp, 1);
4355     g_free(host_mb);
4356     return ret;
4357 }
4358 
4359 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4360                                                abi_ulong target_addr)
4361 {
4362     struct target_shmid_ds *target_sd;
4363 
4364     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4365         return -TARGET_EFAULT;
4366     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4367         return -TARGET_EFAULT;
4368     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4369     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4370     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4371     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4372     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4373     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4374     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4375     unlock_user_struct(target_sd, target_addr, 0);
4376     return 0;
4377 }
4378 
4379 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4380                                                struct shmid_ds *host_sd)
4381 {
4382     struct target_shmid_ds *target_sd;
4383 
4384     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4385         return -TARGET_EFAULT;
4386     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4387         return -TARGET_EFAULT;
4388     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4389     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4390     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4391     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4392     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4393     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4394     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4395     unlock_user_struct(target_sd, target_addr, 1);
4396     return 0;
4397 }
4398 
4399 struct  target_shminfo {
4400     abi_ulong shmmax;
4401     abi_ulong shmmin;
4402     abi_ulong shmmni;
4403     abi_ulong shmseg;
4404     abi_ulong shmall;
4405 };
4406 
4407 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4408                                               struct shminfo *host_shminfo)
4409 {
4410     struct target_shminfo *target_shminfo;
4411     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4412         return -TARGET_EFAULT;
4413     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4414     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4415     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4416     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4417     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4418     unlock_user_struct(target_shminfo, target_addr, 1);
4419     return 0;
4420 }
4421 
4422 struct target_shm_info {
4423     int used_ids;
4424     abi_ulong shm_tot;
4425     abi_ulong shm_rss;
4426     abi_ulong shm_swp;
4427     abi_ulong swap_attempts;
4428     abi_ulong swap_successes;
4429 };
4430 
4431 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4432                                                struct shm_info *host_shm_info)
4433 {
4434     struct target_shm_info *target_shm_info;
4435     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4436         return -TARGET_EFAULT;
4437     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4438     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4439     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4440     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4441     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4442     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4443     unlock_user_struct(target_shm_info, target_addr, 1);
4444     return 0;
4445 }
4446 
4447 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4448 {
4449     struct shmid_ds dsarg;
4450     struct shminfo shminfo;
4451     struct shm_info shm_info;
4452     abi_long ret = -TARGET_EINVAL;
4453 
4454     cmd &= 0xff;
4455 
4456     switch(cmd) {
4457     case IPC_STAT:
4458     case IPC_SET:
4459     case SHM_STAT:
4460         if (target_to_host_shmid_ds(&dsarg, buf))
4461             return -TARGET_EFAULT;
4462         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4463         if (host_to_target_shmid_ds(buf, &dsarg))
4464             return -TARGET_EFAULT;
4465         break;
4466     case IPC_INFO:
4467         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4468         if (host_to_target_shminfo(buf, &shminfo))
4469             return -TARGET_EFAULT;
4470         break;
4471     case SHM_INFO:
4472         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4473         if (host_to_target_shm_info(buf, &shm_info))
4474             return -TARGET_EFAULT;
4475         break;
4476     case IPC_RMID:
4477     case SHM_LOCK:
4478     case SHM_UNLOCK:
4479         ret = get_errno(shmctl(shmid, cmd, NULL));
4480         break;
4481     }
4482 
4483     return ret;
4484 }
4485 
4486 #ifndef TARGET_FORCE_SHMLBA
4487 /* For most architectures, SHMLBA is the same as the page size;
4488  * some architectures have larger values, in which case they should
4489  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4490  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4491  * and defining its own value for SHMLBA.
4492  *
4493  * The kernel also permits SHMLBA to be set by the architecture to a
4494  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4495  * this means that addresses are rounded to the large size if
4496  * SHM_RND is set but addresses not aligned to that size are not rejected
4497  * as long as they are at least page-aligned. Since the only architecture
4498  * which uses this is ia64 this code doesn't provide for that oddity.
4499  */
4500 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4501 {
4502     return TARGET_PAGE_SIZE;
4503 }
4504 #endif
4505 
4506 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4507                                  int shmid, abi_ulong shmaddr, int shmflg)
4508 {
4509     CPUState *cpu = env_cpu(cpu_env);
4510     abi_long raddr;
4511     void *host_raddr;
4512     struct shmid_ds shm_info;
4513     int i,ret;
4514     abi_ulong shmlba;
4515 
4516     /* shmat pointers are always untagged */
4517 
4518     /* find out the length of the shared memory segment */
4519     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4520     if (is_error(ret)) {
4521         /* can't get length, bail out */
4522         return ret;
4523     }
4524 
4525     shmlba = target_shmlba(cpu_env);
4526 
4527     if (shmaddr & (shmlba - 1)) {
4528         if (shmflg & SHM_RND) {
4529             shmaddr &= ~(shmlba - 1);
4530         } else {
4531             return -TARGET_EINVAL;
4532         }
4533     }
4534     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4535         return -TARGET_EINVAL;
4536     }
4537 
4538     mmap_lock();
4539 
4540     /*
4541      * We're mapping shared memory, so ensure we generate code for parallel
4542      * execution and flush old translations.  This will work up to the level
4543      * supported by the host -- anything that requires EXCP_ATOMIC will not
4544      * be atomic with respect to an external process.
4545      */
4546     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4547         cpu->tcg_cflags |= CF_PARALLEL;
4548         tb_flush(cpu);
4549     }
4550 
4551     if (shmaddr)
4552         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4553     else {
4554         abi_ulong mmap_start;
4555 
4556         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4557         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4558 
4559         if (mmap_start == -1) {
4560             errno = ENOMEM;
4561             host_raddr = (void *)-1;
4562         } else
4563             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4564                                shmflg | SHM_REMAP);
4565     }
4566 
4567     if (host_raddr == (void *)-1) {
4568         mmap_unlock();
4569         return get_errno((long)host_raddr);
4570     }
4571     raddr=h2g((unsigned long)host_raddr);
4572 
4573     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4574                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4575                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4576 
4577     for (i = 0; i < N_SHM_REGIONS; i++) {
4578         if (!shm_regions[i].in_use) {
4579             shm_regions[i].in_use = true;
4580             shm_regions[i].start = raddr;
4581             shm_regions[i].size = shm_info.shm_segsz;
4582             break;
4583         }
4584     }
4585 
4586     mmap_unlock();
4587     return raddr;
4588 
4589 }
4590 
4591 static inline abi_long do_shmdt(abi_ulong shmaddr)
4592 {
4593     int i;
4594     abi_long rv;
4595 
4596     /* shmdt pointers are always untagged */
4597 
4598     mmap_lock();
4599 
4600     for (i = 0; i < N_SHM_REGIONS; ++i) {
4601         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4602             shm_regions[i].in_use = false;
4603             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4604             break;
4605         }
4606     }
4607     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4608 
4609     mmap_unlock();
4610 
4611     return rv;
4612 }
4613 
4614 #ifdef TARGET_NR_ipc
4615 /* ??? This only works with linear mappings.  */
4616 /* do_ipc() must return target values and target errnos. */
4617 static abi_long do_ipc(CPUArchState *cpu_env,
4618                        unsigned int call, abi_long first,
4619                        abi_long second, abi_long third,
4620                        abi_long ptr, abi_long fifth)
4621 {
4622     int version;
4623     abi_long ret = 0;
4624 
4625     version = call >> 16;
4626     call &= 0xffff;
4627 
4628     switch (call) {
4629     case IPCOP_semop:
4630         ret = do_semtimedop(first, ptr, second, 0, false);
4631         break;
4632     case IPCOP_semtimedop:
4633     /*
4634      * The s390 sys_ipc variant has only five parameters instead of six
4635      * (as for default variant) and the only difference is the handling of
4636      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4637      * to a struct timespec where the generic variant uses fifth parameter.
4638      */
4639 #if defined(TARGET_S390X)
4640         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4641 #else
4642         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4643 #endif
4644         break;
4645 
4646     case IPCOP_semget:
4647         ret = get_errno(semget(first, second, third));
4648         break;
4649 
4650     case IPCOP_semctl: {
4651         /* The semun argument to semctl is passed by value, so dereference the
4652          * ptr argument. */
4653         abi_ulong atptr;
4654         get_user_ual(atptr, ptr);
4655         ret = do_semctl(first, second, third, atptr);
4656         break;
4657     }
4658 
4659     case IPCOP_msgget:
4660         ret = get_errno(msgget(first, second));
4661         break;
4662 
4663     case IPCOP_msgsnd:
4664         ret = do_msgsnd(first, ptr, second, third);
4665         break;
4666 
4667     case IPCOP_msgctl:
4668         ret = do_msgctl(first, second, ptr);
4669         break;
4670 
4671     case IPCOP_msgrcv:
4672         switch (version) {
4673         case 0:
4674             {
4675                 struct target_ipc_kludge {
4676                     abi_long msgp;
4677                     abi_long msgtyp;
4678                 } *tmp;
4679 
4680                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4681                     ret = -TARGET_EFAULT;
4682                     break;
4683                 }
4684 
4685                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4686 
4687                 unlock_user_struct(tmp, ptr, 0);
4688                 break;
4689             }
4690         default:
4691             ret = do_msgrcv(first, ptr, second, fifth, third);
4692         }
4693         break;
4694 
4695     case IPCOP_shmat:
4696         switch (version) {
4697         default:
4698         {
4699             abi_ulong raddr;
4700             raddr = do_shmat(cpu_env, first, ptr, second);
4701             if (is_error(raddr))
4702                 return get_errno(raddr);
4703             if (put_user_ual(raddr, third))
4704                 return -TARGET_EFAULT;
4705             break;
4706         }
4707         case 1:
4708             ret = -TARGET_EINVAL;
4709             break;
4710         }
4711 	break;
4712     case IPCOP_shmdt:
4713         ret = do_shmdt(ptr);
4714 	break;
4715 
4716     case IPCOP_shmget:
4717 	/* IPC_* flag values are the same on all linux platforms */
4718 	ret = get_errno(shmget(first, second, third));
4719 	break;
4720 
4721 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4722     case IPCOP_shmctl:
4723         ret = do_shmctl(first, second, ptr);
4724         break;
4725     default:
4726         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4727                       call, version);
4728 	ret = -TARGET_ENOSYS;
4729 	break;
4730     }
4731     return ret;
4732 }
4733 #endif
4734 
4735 /* kernel structure types definitions */
4736 
4737 #define STRUCT(name, ...) STRUCT_ ## name,
4738 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4739 enum {
4740 #include "syscall_types.h"
4741 STRUCT_MAX
4742 };
4743 #undef STRUCT
4744 #undef STRUCT_SPECIAL
4745 
4746 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4747 #define STRUCT_SPECIAL(name)
4748 #include "syscall_types.h"
4749 #undef STRUCT
4750 #undef STRUCT_SPECIAL
4751 
4752 #define MAX_STRUCT_SIZE 4096
4753 
4754 #ifdef CONFIG_FIEMAP
4755 /* So fiemap access checks don't overflow on 32 bit systems.
4756  * This is very slightly smaller than the limit imposed by
4757  * the underlying kernel.
4758  */
4759 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4760                             / sizeof(struct fiemap_extent))
4761 
4762 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4763                                        int fd, int cmd, abi_long arg)
4764 {
4765     /* The parameter for this ioctl is a struct fiemap followed
4766      * by an array of struct fiemap_extent whose size is set
4767      * in fiemap->fm_extent_count. The array is filled in by the
4768      * ioctl.
4769      */
4770     int target_size_in, target_size_out;
4771     struct fiemap *fm;
4772     const argtype *arg_type = ie->arg_type;
4773     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4774     void *argptr, *p;
4775     abi_long ret;
4776     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4777     uint32_t outbufsz;
4778     int free_fm = 0;
4779 
4780     assert(arg_type[0] == TYPE_PTR);
4781     assert(ie->access == IOC_RW);
4782     arg_type++;
4783     target_size_in = thunk_type_size(arg_type, 0);
4784     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4785     if (!argptr) {
4786         return -TARGET_EFAULT;
4787     }
4788     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4789     unlock_user(argptr, arg, 0);
4790     fm = (struct fiemap *)buf_temp;
4791     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4792         return -TARGET_EINVAL;
4793     }
4794 
4795     outbufsz = sizeof (*fm) +
4796         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4797 
4798     if (outbufsz > MAX_STRUCT_SIZE) {
4799         /* We can't fit all the extents into the fixed size buffer.
4800          * Allocate one that is large enough and use it instead.
4801          */
4802         fm = g_try_malloc(outbufsz);
4803         if (!fm) {
4804             return -TARGET_ENOMEM;
4805         }
4806         memcpy(fm, buf_temp, sizeof(struct fiemap));
4807         free_fm = 1;
4808     }
4809     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4810     if (!is_error(ret)) {
4811         target_size_out = target_size_in;
4812         /* An extent_count of 0 means we were only counting the extents
4813          * so there are no structs to copy
4814          */
4815         if (fm->fm_extent_count != 0) {
4816             target_size_out += fm->fm_mapped_extents * extent_size;
4817         }
4818         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4819         if (!argptr) {
4820             ret = -TARGET_EFAULT;
4821         } else {
4822             /* Convert the struct fiemap */
4823             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4824             if (fm->fm_extent_count != 0) {
4825                 p = argptr + target_size_in;
4826                 /* ...and then all the struct fiemap_extents */
4827                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4828                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4829                                   THUNK_TARGET);
4830                     p += extent_size;
4831                 }
4832             }
4833             unlock_user(argptr, arg, target_size_out);
4834         }
4835     }
4836     if (free_fm) {
4837         g_free(fm);
4838     }
4839     return ret;
4840 }
4841 #endif
4842 
4843 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4844                                 int fd, int cmd, abi_long arg)
4845 {
4846     const argtype *arg_type = ie->arg_type;
4847     int target_size;
4848     void *argptr;
4849     int ret;
4850     struct ifconf *host_ifconf;
4851     uint32_t outbufsz;
4852     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4853     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4854     int target_ifreq_size;
4855     int nb_ifreq;
4856     int free_buf = 0;
4857     int i;
4858     int target_ifc_len;
4859     abi_long target_ifc_buf;
4860     int host_ifc_len;
4861     char *host_ifc_buf;
4862 
4863     assert(arg_type[0] == TYPE_PTR);
4864     assert(ie->access == IOC_RW);
4865 
4866     arg_type++;
4867     target_size = thunk_type_size(arg_type, 0);
4868 
4869     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4870     if (!argptr)
4871         return -TARGET_EFAULT;
4872     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4873     unlock_user(argptr, arg, 0);
4874 
4875     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4876     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4877     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4878 
4879     if (target_ifc_buf != 0) {
4880         target_ifc_len = host_ifconf->ifc_len;
4881         nb_ifreq = target_ifc_len / target_ifreq_size;
4882         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4883 
4884         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4885         if (outbufsz > MAX_STRUCT_SIZE) {
4886             /*
4887              * We can't fit all the extents into the fixed size buffer.
4888              * Allocate one that is large enough and use it instead.
4889              */
4890             host_ifconf = g_try_malloc(outbufsz);
4891             if (!host_ifconf) {
4892                 return -TARGET_ENOMEM;
4893             }
4894             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4895             free_buf = 1;
4896         }
4897         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4898 
4899         host_ifconf->ifc_len = host_ifc_len;
4900     } else {
4901       host_ifc_buf = NULL;
4902     }
4903     host_ifconf->ifc_buf = host_ifc_buf;
4904 
4905     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4906     if (!is_error(ret)) {
4907 	/* convert host ifc_len to target ifc_len */
4908 
4909         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4910         target_ifc_len = nb_ifreq * target_ifreq_size;
4911         host_ifconf->ifc_len = target_ifc_len;
4912 
4913 	/* restore target ifc_buf */
4914 
4915         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4916 
4917 	/* copy struct ifconf to target user */
4918 
4919         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4920         if (!argptr)
4921             return -TARGET_EFAULT;
4922         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4923         unlock_user(argptr, arg, target_size);
4924 
4925         if (target_ifc_buf != 0) {
4926             /* copy ifreq[] to target user */
4927             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4928             for (i = 0; i < nb_ifreq ; i++) {
4929                 thunk_convert(argptr + i * target_ifreq_size,
4930                               host_ifc_buf + i * sizeof(struct ifreq),
4931                               ifreq_arg_type, THUNK_TARGET);
4932             }
4933             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4934         }
4935     }
4936 
4937     if (free_buf) {
4938         g_free(host_ifconf);
4939     }
4940 
4941     return ret;
4942 }
4943 
4944 #if defined(CONFIG_USBFS)
4945 #if HOST_LONG_BITS > 64
4946 #error USBDEVFS thunks do not support >64 bit hosts yet.
4947 #endif
4948 struct live_urb {
4949     uint64_t target_urb_adr;
4950     uint64_t target_buf_adr;
4951     char *target_buf_ptr;
4952     struct usbdevfs_urb host_urb;
4953 };
4954 
4955 static GHashTable *usbdevfs_urb_hashtable(void)
4956 {
4957     static GHashTable *urb_hashtable;
4958 
4959     if (!urb_hashtable) {
4960         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4961     }
4962     return urb_hashtable;
4963 }
4964 
4965 static void urb_hashtable_insert(struct live_urb *urb)
4966 {
4967     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4968     g_hash_table_insert(urb_hashtable, urb, urb);
4969 }
4970 
4971 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4972 {
4973     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4974     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4975 }
4976 
4977 static void urb_hashtable_remove(struct live_urb *urb)
4978 {
4979     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4980     g_hash_table_remove(urb_hashtable, urb);
4981 }
4982 
4983 static abi_long
4984 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4985                           int fd, int cmd, abi_long arg)
4986 {
4987     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4988     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4989     struct live_urb *lurb;
4990     void *argptr;
4991     uint64_t hurb;
4992     int target_size;
4993     uintptr_t target_urb_adr;
4994     abi_long ret;
4995 
4996     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4997 
4998     memset(buf_temp, 0, sizeof(uint64_t));
4999     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5000     if (is_error(ret)) {
5001         return ret;
5002     }
5003 
5004     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5005     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5006     if (!lurb->target_urb_adr) {
5007         return -TARGET_EFAULT;
5008     }
5009     urb_hashtable_remove(lurb);
5010     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5011         lurb->host_urb.buffer_length);
5012     lurb->target_buf_ptr = NULL;
5013 
5014     /* restore the guest buffer pointer */
5015     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5016 
5017     /* update the guest urb struct */
5018     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5019     if (!argptr) {
5020         g_free(lurb);
5021         return -TARGET_EFAULT;
5022     }
5023     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5024     unlock_user(argptr, lurb->target_urb_adr, target_size);
5025 
5026     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5027     /* write back the urb handle */
5028     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5029     if (!argptr) {
5030         g_free(lurb);
5031         return -TARGET_EFAULT;
5032     }
5033 
5034     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5035     target_urb_adr = lurb->target_urb_adr;
5036     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5037     unlock_user(argptr, arg, target_size);
5038 
5039     g_free(lurb);
5040     return ret;
5041 }
5042 
5043 static abi_long
5044 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5045                              uint8_t *buf_temp __attribute__((unused)),
5046                              int fd, int cmd, abi_long arg)
5047 {
5048     struct live_urb *lurb;
5049 
5050     /* map target address back to host URB with metadata. */
5051     lurb = urb_hashtable_lookup(arg);
5052     if (!lurb) {
5053         return -TARGET_EFAULT;
5054     }
5055     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5056 }
5057 
5058 static abi_long
5059 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5060                             int fd, int cmd, abi_long arg)
5061 {
5062     const argtype *arg_type = ie->arg_type;
5063     int target_size;
5064     abi_long ret;
5065     void *argptr;
5066     int rw_dir;
5067     struct live_urb *lurb;
5068 
5069     /*
5070      * each submitted URB needs to map to a unique ID for the
5071      * kernel, and that unique ID needs to be a pointer to
5072      * host memory.  hence, we need to malloc for each URB.
5073      * isochronous transfers have a variable length struct.
5074      */
5075     arg_type++;
5076     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5077 
5078     /* construct host copy of urb and metadata */
5079     lurb = g_try_new0(struct live_urb, 1);
5080     if (!lurb) {
5081         return -TARGET_ENOMEM;
5082     }
5083 
5084     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5085     if (!argptr) {
5086         g_free(lurb);
5087         return -TARGET_EFAULT;
5088     }
5089     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5090     unlock_user(argptr, arg, 0);
5091 
5092     lurb->target_urb_adr = arg;
5093     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5094 
5095     /* buffer space used depends on endpoint type so lock the entire buffer */
5096     /* control type urbs should check the buffer contents for true direction */
5097     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5098     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5099         lurb->host_urb.buffer_length, 1);
5100     if (lurb->target_buf_ptr == NULL) {
5101         g_free(lurb);
5102         return -TARGET_EFAULT;
5103     }
5104 
5105     /* update buffer pointer in host copy */
5106     lurb->host_urb.buffer = lurb->target_buf_ptr;
5107 
5108     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5109     if (is_error(ret)) {
5110         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5111         g_free(lurb);
5112     } else {
5113         urb_hashtable_insert(lurb);
5114     }
5115 
5116     return ret;
5117 }
5118 #endif /* CONFIG_USBFS */
5119 
5120 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5121                             int cmd, abi_long arg)
5122 {
5123     void *argptr;
5124     struct dm_ioctl *host_dm;
5125     abi_long guest_data;
5126     uint32_t guest_data_size;
5127     int target_size;
5128     const argtype *arg_type = ie->arg_type;
5129     abi_long ret;
5130     void *big_buf = NULL;
5131     char *host_data;
5132 
5133     arg_type++;
5134     target_size = thunk_type_size(arg_type, 0);
5135     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5136     if (!argptr) {
5137         ret = -TARGET_EFAULT;
5138         goto out;
5139     }
5140     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5141     unlock_user(argptr, arg, 0);
5142 
5143     /* buf_temp is too small, so fetch things into a bigger buffer */
5144     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5145     memcpy(big_buf, buf_temp, target_size);
5146     buf_temp = big_buf;
5147     host_dm = big_buf;
5148 
5149     guest_data = arg + host_dm->data_start;
5150     if ((guest_data - arg) < 0) {
5151         ret = -TARGET_EINVAL;
5152         goto out;
5153     }
5154     guest_data_size = host_dm->data_size - host_dm->data_start;
5155     host_data = (char*)host_dm + host_dm->data_start;
5156 
5157     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5158     if (!argptr) {
5159         ret = -TARGET_EFAULT;
5160         goto out;
5161     }
5162 
5163     switch (ie->host_cmd) {
5164     case DM_REMOVE_ALL:
5165     case DM_LIST_DEVICES:
5166     case DM_DEV_CREATE:
5167     case DM_DEV_REMOVE:
5168     case DM_DEV_SUSPEND:
5169     case DM_DEV_STATUS:
5170     case DM_DEV_WAIT:
5171     case DM_TABLE_STATUS:
5172     case DM_TABLE_CLEAR:
5173     case DM_TABLE_DEPS:
5174     case DM_LIST_VERSIONS:
5175         /* no input data */
5176         break;
5177     case DM_DEV_RENAME:
5178     case DM_DEV_SET_GEOMETRY:
5179         /* data contains only strings */
5180         memcpy(host_data, argptr, guest_data_size);
5181         break;
5182     case DM_TARGET_MSG:
5183         memcpy(host_data, argptr, guest_data_size);
5184         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5185         break;
5186     case DM_TABLE_LOAD:
5187     {
5188         void *gspec = argptr;
5189         void *cur_data = host_data;
5190         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5191         int spec_size = thunk_type_size(arg_type, 0);
5192         int i;
5193 
5194         for (i = 0; i < host_dm->target_count; i++) {
5195             struct dm_target_spec *spec = cur_data;
5196             uint32_t next;
5197             int slen;
5198 
5199             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5200             slen = strlen((char*)gspec + spec_size) + 1;
5201             next = spec->next;
5202             spec->next = sizeof(*spec) + slen;
5203             strcpy((char*)&spec[1], gspec + spec_size);
5204             gspec += next;
5205             cur_data += spec->next;
5206         }
5207         break;
5208     }
5209     default:
5210         ret = -TARGET_EINVAL;
5211         unlock_user(argptr, guest_data, 0);
5212         goto out;
5213     }
5214     unlock_user(argptr, guest_data, 0);
5215 
5216     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5217     if (!is_error(ret)) {
5218         guest_data = arg + host_dm->data_start;
5219         guest_data_size = host_dm->data_size - host_dm->data_start;
5220         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5221         switch (ie->host_cmd) {
5222         case DM_REMOVE_ALL:
5223         case DM_DEV_CREATE:
5224         case DM_DEV_REMOVE:
5225         case DM_DEV_RENAME:
5226         case DM_DEV_SUSPEND:
5227         case DM_DEV_STATUS:
5228         case DM_TABLE_LOAD:
5229         case DM_TABLE_CLEAR:
5230         case DM_TARGET_MSG:
5231         case DM_DEV_SET_GEOMETRY:
5232             /* no return data */
5233             break;
5234         case DM_LIST_DEVICES:
5235         {
5236             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5237             uint32_t remaining_data = guest_data_size;
5238             void *cur_data = argptr;
5239             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5240             int nl_size = 12; /* can't use thunk_size due to alignment */
5241 
5242             while (1) {
5243                 uint32_t next = nl->next;
5244                 if (next) {
5245                     nl->next = nl_size + (strlen(nl->name) + 1);
5246                 }
5247                 if (remaining_data < nl->next) {
5248                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5249                     break;
5250                 }
5251                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5252                 strcpy(cur_data + nl_size, nl->name);
5253                 cur_data += nl->next;
5254                 remaining_data -= nl->next;
5255                 if (!next) {
5256                     break;
5257                 }
5258                 nl = (void*)nl + next;
5259             }
5260             break;
5261         }
5262         case DM_DEV_WAIT:
5263         case DM_TABLE_STATUS:
5264         {
5265             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5266             void *cur_data = argptr;
5267             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5268             int spec_size = thunk_type_size(arg_type, 0);
5269             int i;
5270 
5271             for (i = 0; i < host_dm->target_count; i++) {
5272                 uint32_t next = spec->next;
5273                 int slen = strlen((char*)&spec[1]) + 1;
5274                 spec->next = (cur_data - argptr) + spec_size + slen;
5275                 if (guest_data_size < spec->next) {
5276                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5277                     break;
5278                 }
5279                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5280                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5281                 cur_data = argptr + spec->next;
5282                 spec = (void*)host_dm + host_dm->data_start + next;
5283             }
5284             break;
5285         }
5286         case DM_TABLE_DEPS:
5287         {
5288             void *hdata = (void*)host_dm + host_dm->data_start;
5289             int count = *(uint32_t*)hdata;
5290             uint64_t *hdev = hdata + 8;
5291             uint64_t *gdev = argptr + 8;
5292             int i;
5293 
5294             *(uint32_t*)argptr = tswap32(count);
5295             for (i = 0; i < count; i++) {
5296                 *gdev = tswap64(*hdev);
5297                 gdev++;
5298                 hdev++;
5299             }
5300             break;
5301         }
5302         case DM_LIST_VERSIONS:
5303         {
5304             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5305             uint32_t remaining_data = guest_data_size;
5306             void *cur_data = argptr;
5307             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5308             int vers_size = thunk_type_size(arg_type, 0);
5309 
5310             while (1) {
5311                 uint32_t next = vers->next;
5312                 if (next) {
5313                     vers->next = vers_size + (strlen(vers->name) + 1);
5314                 }
5315                 if (remaining_data < vers->next) {
5316                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5317                     break;
5318                 }
5319                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5320                 strcpy(cur_data + vers_size, vers->name);
5321                 cur_data += vers->next;
5322                 remaining_data -= vers->next;
5323                 if (!next) {
5324                     break;
5325                 }
5326                 vers = (void*)vers + next;
5327             }
5328             break;
5329         }
5330         default:
5331             unlock_user(argptr, guest_data, 0);
5332             ret = -TARGET_EINVAL;
5333             goto out;
5334         }
5335         unlock_user(argptr, guest_data, guest_data_size);
5336 
5337         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5338         if (!argptr) {
5339             ret = -TARGET_EFAULT;
5340             goto out;
5341         }
5342         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5343         unlock_user(argptr, arg, target_size);
5344     }
5345 out:
5346     g_free(big_buf);
5347     return ret;
5348 }
5349 
5350 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5351                                int cmd, abi_long arg)
5352 {
5353     void *argptr;
5354     int target_size;
5355     const argtype *arg_type = ie->arg_type;
5356     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5357     abi_long ret;
5358 
5359     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5360     struct blkpg_partition host_part;
5361 
5362     /* Read and convert blkpg */
5363     arg_type++;
5364     target_size = thunk_type_size(arg_type, 0);
5365     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5366     if (!argptr) {
5367         ret = -TARGET_EFAULT;
5368         goto out;
5369     }
5370     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5371     unlock_user(argptr, arg, 0);
5372 
5373     switch (host_blkpg->op) {
5374     case BLKPG_ADD_PARTITION:
5375     case BLKPG_DEL_PARTITION:
5376         /* payload is struct blkpg_partition */
5377         break;
5378     default:
5379         /* Unknown opcode */
5380         ret = -TARGET_EINVAL;
5381         goto out;
5382     }
5383 
5384     /* Read and convert blkpg->data */
5385     arg = (abi_long)(uintptr_t)host_blkpg->data;
5386     target_size = thunk_type_size(part_arg_type, 0);
5387     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5388     if (!argptr) {
5389         ret = -TARGET_EFAULT;
5390         goto out;
5391     }
5392     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5393     unlock_user(argptr, arg, 0);
5394 
5395     /* Swizzle the data pointer to our local copy and call! */
5396     host_blkpg->data = &host_part;
5397     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5398 
5399 out:
5400     return ret;
5401 }
5402 
5403 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5404                                 int fd, int cmd, abi_long arg)
5405 {
5406     const argtype *arg_type = ie->arg_type;
5407     const StructEntry *se;
5408     const argtype *field_types;
5409     const int *dst_offsets, *src_offsets;
5410     int target_size;
5411     void *argptr;
5412     abi_ulong *target_rt_dev_ptr = NULL;
5413     unsigned long *host_rt_dev_ptr = NULL;
5414     abi_long ret;
5415     int i;
5416 
5417     assert(ie->access == IOC_W);
5418     assert(*arg_type == TYPE_PTR);
5419     arg_type++;
5420     assert(*arg_type == TYPE_STRUCT);
5421     target_size = thunk_type_size(arg_type, 0);
5422     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5423     if (!argptr) {
5424         return -TARGET_EFAULT;
5425     }
5426     arg_type++;
5427     assert(*arg_type == (int)STRUCT_rtentry);
5428     se = struct_entries + *arg_type++;
5429     assert(se->convert[0] == NULL);
5430     /* convert struct here to be able to catch rt_dev string */
5431     field_types = se->field_types;
5432     dst_offsets = se->field_offsets[THUNK_HOST];
5433     src_offsets = se->field_offsets[THUNK_TARGET];
5434     for (i = 0; i < se->nb_fields; i++) {
5435         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5436             assert(*field_types == TYPE_PTRVOID);
5437             target_rt_dev_ptr = argptr + src_offsets[i];
5438             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5439             if (*target_rt_dev_ptr != 0) {
5440                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5441                                                   tswapal(*target_rt_dev_ptr));
5442                 if (!*host_rt_dev_ptr) {
5443                     unlock_user(argptr, arg, 0);
5444                     return -TARGET_EFAULT;
5445                 }
5446             } else {
5447                 *host_rt_dev_ptr = 0;
5448             }
5449             field_types++;
5450             continue;
5451         }
5452         field_types = thunk_convert(buf_temp + dst_offsets[i],
5453                                     argptr + src_offsets[i],
5454                                     field_types, THUNK_HOST);
5455     }
5456     unlock_user(argptr, arg, 0);
5457 
5458     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5459 
5460     assert(host_rt_dev_ptr != NULL);
5461     assert(target_rt_dev_ptr != NULL);
5462     if (*host_rt_dev_ptr != 0) {
5463         unlock_user((void *)*host_rt_dev_ptr,
5464                     *target_rt_dev_ptr, 0);
5465     }
5466     return ret;
5467 }
5468 
5469 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5470                                      int fd, int cmd, abi_long arg)
5471 {
5472     int sig = target_to_host_signal(arg);
5473     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5474 }
5475 
5476 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5477                                     int fd, int cmd, abi_long arg)
5478 {
5479     struct timeval tv;
5480     abi_long ret;
5481 
5482     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5483     if (is_error(ret)) {
5484         return ret;
5485     }
5486 
5487     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5488         if (copy_to_user_timeval(arg, &tv)) {
5489             return -TARGET_EFAULT;
5490         }
5491     } else {
5492         if (copy_to_user_timeval64(arg, &tv)) {
5493             return -TARGET_EFAULT;
5494         }
5495     }
5496 
5497     return ret;
5498 }
5499 
5500 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5501                                       int fd, int cmd, abi_long arg)
5502 {
5503     struct timespec ts;
5504     abi_long ret;
5505 
5506     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5507     if (is_error(ret)) {
5508         return ret;
5509     }
5510 
5511     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5512         if (host_to_target_timespec(arg, &ts)) {
5513             return -TARGET_EFAULT;
5514         }
5515     } else{
5516         if (host_to_target_timespec64(arg, &ts)) {
5517             return -TARGET_EFAULT;
5518         }
5519     }
5520 
5521     return ret;
5522 }
5523 
5524 #ifdef TIOCGPTPEER
5525 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5526                                      int fd, int cmd, abi_long arg)
5527 {
5528     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5529     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5530 }
5531 #endif
5532 
5533 #ifdef HAVE_DRM_H
5534 
5535 static void unlock_drm_version(struct drm_version *host_ver,
5536                                struct target_drm_version *target_ver,
5537                                bool copy)
5538 {
5539     unlock_user(host_ver->name, target_ver->name,
5540                                 copy ? host_ver->name_len : 0);
5541     unlock_user(host_ver->date, target_ver->date,
5542                                 copy ? host_ver->date_len : 0);
5543     unlock_user(host_ver->desc, target_ver->desc,
5544                                 copy ? host_ver->desc_len : 0);
5545 }
5546 
5547 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5548                                           struct target_drm_version *target_ver)
5549 {
5550     memset(host_ver, 0, sizeof(*host_ver));
5551 
5552     __get_user(host_ver->name_len, &target_ver->name_len);
5553     if (host_ver->name_len) {
5554         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5555                                    target_ver->name_len, 0);
5556         if (!host_ver->name) {
5557             return -EFAULT;
5558         }
5559     }
5560 
5561     __get_user(host_ver->date_len, &target_ver->date_len);
5562     if (host_ver->date_len) {
5563         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5564                                    target_ver->date_len, 0);
5565         if (!host_ver->date) {
5566             goto err;
5567         }
5568     }
5569 
5570     __get_user(host_ver->desc_len, &target_ver->desc_len);
5571     if (host_ver->desc_len) {
5572         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5573                                    target_ver->desc_len, 0);
5574         if (!host_ver->desc) {
5575             goto err;
5576         }
5577     }
5578 
5579     return 0;
5580 err:
5581     unlock_drm_version(host_ver, target_ver, false);
5582     return -EFAULT;
5583 }
5584 
5585 static inline void host_to_target_drmversion(
5586                                           struct target_drm_version *target_ver,
5587                                           struct drm_version *host_ver)
5588 {
5589     __put_user(host_ver->version_major, &target_ver->version_major);
5590     __put_user(host_ver->version_minor, &target_ver->version_minor);
5591     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5592     __put_user(host_ver->name_len, &target_ver->name_len);
5593     __put_user(host_ver->date_len, &target_ver->date_len);
5594     __put_user(host_ver->desc_len, &target_ver->desc_len);
5595     unlock_drm_version(host_ver, target_ver, true);
5596 }
5597 
5598 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5599                              int fd, int cmd, abi_long arg)
5600 {
5601     struct drm_version *ver;
5602     struct target_drm_version *target_ver;
5603     abi_long ret;
5604 
5605     switch (ie->host_cmd) {
5606     case DRM_IOCTL_VERSION:
5607         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5608             return -TARGET_EFAULT;
5609         }
5610         ver = (struct drm_version *)buf_temp;
5611         ret = target_to_host_drmversion(ver, target_ver);
5612         if (!is_error(ret)) {
5613             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5614             if (is_error(ret)) {
5615                 unlock_drm_version(ver, target_ver, false);
5616             } else {
5617                 host_to_target_drmversion(target_ver, ver);
5618             }
5619         }
5620         unlock_user_struct(target_ver, arg, 0);
5621         return ret;
5622     }
5623     return -TARGET_ENOSYS;
5624 }
5625 
5626 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5627                                            struct drm_i915_getparam *gparam,
5628                                            int fd, abi_long arg)
5629 {
5630     abi_long ret;
5631     int value;
5632     struct target_drm_i915_getparam *target_gparam;
5633 
5634     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5635         return -TARGET_EFAULT;
5636     }
5637 
5638     __get_user(gparam->param, &target_gparam->param);
5639     gparam->value = &value;
5640     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5641     put_user_s32(value, target_gparam->value);
5642 
5643     unlock_user_struct(target_gparam, arg, 0);
5644     return ret;
5645 }
5646 
5647 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5648                                   int fd, int cmd, abi_long arg)
5649 {
5650     switch (ie->host_cmd) {
5651     case DRM_IOCTL_I915_GETPARAM:
5652         return do_ioctl_drm_i915_getparam(ie,
5653                                           (struct drm_i915_getparam *)buf_temp,
5654                                           fd, arg);
5655     default:
5656         return -TARGET_ENOSYS;
5657     }
5658 }
5659 
5660 #endif
5661 
5662 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5663                                         int fd, int cmd, abi_long arg)
5664 {
5665     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5666     struct tun_filter *target_filter;
5667     char *target_addr;
5668 
5669     assert(ie->access == IOC_W);
5670 
5671     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5672     if (!target_filter) {
5673         return -TARGET_EFAULT;
5674     }
5675     filter->flags = tswap16(target_filter->flags);
5676     filter->count = tswap16(target_filter->count);
5677     unlock_user(target_filter, arg, 0);
5678 
5679     if (filter->count) {
5680         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5681             MAX_STRUCT_SIZE) {
5682             return -TARGET_EFAULT;
5683         }
5684 
5685         target_addr = lock_user(VERIFY_READ,
5686                                 arg + offsetof(struct tun_filter, addr),
5687                                 filter->count * ETH_ALEN, 1);
5688         if (!target_addr) {
5689             return -TARGET_EFAULT;
5690         }
5691         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5692         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5693     }
5694 
5695     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5696 }
5697 
5698 IOCTLEntry ioctl_entries[] = {
5699 #define IOCTL(cmd, access, ...) \
5700     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5701 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5702     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5703 #define IOCTL_IGNORE(cmd) \
5704     { TARGET_ ## cmd, 0, #cmd },
5705 #include "ioctls.h"
5706     { 0, 0, },
5707 };
5708 
5709 /* ??? Implement proper locking for ioctls.  */
5710 /* do_ioctl() Must return target values and target errnos. */
5711 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5712 {
5713     const IOCTLEntry *ie;
5714     const argtype *arg_type;
5715     abi_long ret;
5716     uint8_t buf_temp[MAX_STRUCT_SIZE];
5717     int target_size;
5718     void *argptr;
5719 
5720     ie = ioctl_entries;
5721     for(;;) {
5722         if (ie->target_cmd == 0) {
5723             qemu_log_mask(
5724                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5725             return -TARGET_ENOSYS;
5726         }
5727         if (ie->target_cmd == cmd)
5728             break;
5729         ie++;
5730     }
5731     arg_type = ie->arg_type;
5732     if (ie->do_ioctl) {
5733         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5734     } else if (!ie->host_cmd) {
5735         /* Some architectures define BSD ioctls in their headers
5736            that are not implemented in Linux.  */
5737         return -TARGET_ENOSYS;
5738     }
5739 
5740     switch(arg_type[0]) {
5741     case TYPE_NULL:
5742         /* no argument */
5743         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5744         break;
5745     case TYPE_PTRVOID:
5746     case TYPE_INT:
5747     case TYPE_LONG:
5748     case TYPE_ULONG:
5749         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5750         break;
5751     case TYPE_PTR:
5752         arg_type++;
5753         target_size = thunk_type_size(arg_type, 0);
5754         switch(ie->access) {
5755         case IOC_R:
5756             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5757             if (!is_error(ret)) {
5758                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5759                 if (!argptr)
5760                     return -TARGET_EFAULT;
5761                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5762                 unlock_user(argptr, arg, target_size);
5763             }
5764             break;
5765         case IOC_W:
5766             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5767             if (!argptr)
5768                 return -TARGET_EFAULT;
5769             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5770             unlock_user(argptr, arg, 0);
5771             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5772             break;
5773         default:
5774         case IOC_RW:
5775             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5776             if (!argptr)
5777                 return -TARGET_EFAULT;
5778             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5779             unlock_user(argptr, arg, 0);
5780             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5781             if (!is_error(ret)) {
5782                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5783                 if (!argptr)
5784                     return -TARGET_EFAULT;
5785                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5786                 unlock_user(argptr, arg, target_size);
5787             }
5788             break;
5789         }
5790         break;
5791     default:
5792         qemu_log_mask(LOG_UNIMP,
5793                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5794                       (long)cmd, arg_type[0]);
5795         ret = -TARGET_ENOSYS;
5796         break;
5797     }
5798     return ret;
5799 }
5800 
5801 static const bitmask_transtbl iflag_tbl[] = {
5802         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5803         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5804         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5805         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5806         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5807         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5808         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5809         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5810         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5811         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5812         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5813         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5814         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5815         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5816         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5817         { 0, 0, 0, 0 }
5818 };
5819 
5820 static const bitmask_transtbl oflag_tbl[] = {
5821 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5822 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5823 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5824 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5825 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5826 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5827 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5828 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5829 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5830 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5831 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5832 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5833 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5834 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5835 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5836 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5837 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5838 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5839 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5840 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5841 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5842 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5843 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5844 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5845 	{ 0, 0, 0, 0 }
5846 };
5847 
5848 static const bitmask_transtbl cflag_tbl[] = {
5849 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5850 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5851 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5852 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5853 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5854 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5855 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5856 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5857 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5858 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5859 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5860 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5861 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5862 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5863 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5864 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5865 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5866 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5867 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5868 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5869 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5870 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5871 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5872 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5873 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5874 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5875 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5876 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5877 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5878 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5879 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5880 	{ 0, 0, 0, 0 }
5881 };
5882 
5883 static const bitmask_transtbl lflag_tbl[] = {
5884   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5885   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5886   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5887   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5888   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5889   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5890   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5891   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5892   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5893   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5894   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5895   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5896   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5897   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5898   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5899   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5900   { 0, 0, 0, 0 }
5901 };
5902 
5903 static void target_to_host_termios (void *dst, const void *src)
5904 {
5905     struct host_termios *host = dst;
5906     const struct target_termios *target = src;
5907 
5908     host->c_iflag =
5909         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5910     host->c_oflag =
5911         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5912     host->c_cflag =
5913         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5914     host->c_lflag =
5915         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5916     host->c_line = target->c_line;
5917 
5918     memset(host->c_cc, 0, sizeof(host->c_cc));
5919     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5920     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5921     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5922     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5923     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5924     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5925     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5926     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5927     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5928     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5929     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5930     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5931     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5932     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5933     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5934     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5935     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5936 }
5937 
5938 static void host_to_target_termios (void *dst, const void *src)
5939 {
5940     struct target_termios *target = dst;
5941     const struct host_termios *host = src;
5942 
5943     target->c_iflag =
5944         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5945     target->c_oflag =
5946         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5947     target->c_cflag =
5948         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5949     target->c_lflag =
5950         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5951     target->c_line = host->c_line;
5952 
5953     memset(target->c_cc, 0, sizeof(target->c_cc));
5954     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5955     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5956     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5957     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5958     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5959     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5960     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5961     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5962     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5963     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5964     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5965     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5966     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5967     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5968     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5969     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5970     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5971 }
5972 
5973 static const StructEntry struct_termios_def = {
5974     .convert = { host_to_target_termios, target_to_host_termios },
5975     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5976     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5977     .print = print_termios,
5978 };
5979 
5980 static const bitmask_transtbl mmap_flags_tbl[] = {
5981     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5982     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5983     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5984     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5985       MAP_ANONYMOUS, MAP_ANONYMOUS },
5986     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5987       MAP_GROWSDOWN, MAP_GROWSDOWN },
5988     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5989       MAP_DENYWRITE, MAP_DENYWRITE },
5990     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5991       MAP_EXECUTABLE, MAP_EXECUTABLE },
5992     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5993     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5994       MAP_NORESERVE, MAP_NORESERVE },
5995     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5996     /* MAP_STACK had been ignored by the kernel for quite some time.
5997        Recognize it for the target insofar as we do not want to pass
5998        it through to the host.  */
5999     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6000     { 0, 0, 0, 0 }
6001 };
6002 
6003 /*
6004  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6005  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6006  */
6007 #if defined(TARGET_I386)
6008 
6009 /* NOTE: there is really one LDT for all the threads */
6010 static uint8_t *ldt_table;
6011 
6012 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6013 {
6014     int size;
6015     void *p;
6016 
6017     if (!ldt_table)
6018         return 0;
6019     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6020     if (size > bytecount)
6021         size = bytecount;
6022     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6023     if (!p)
6024         return -TARGET_EFAULT;
6025     /* ??? Should this by byteswapped?  */
6026     memcpy(p, ldt_table, size);
6027     unlock_user(p, ptr, size);
6028     return size;
6029 }
6030 
6031 /* XXX: add locking support */
6032 static abi_long write_ldt(CPUX86State *env,
6033                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6034 {
6035     struct target_modify_ldt_ldt_s ldt_info;
6036     struct target_modify_ldt_ldt_s *target_ldt_info;
6037     int seg_32bit, contents, read_exec_only, limit_in_pages;
6038     int seg_not_present, useable, lm;
6039     uint32_t *lp, entry_1, entry_2;
6040 
6041     if (bytecount != sizeof(ldt_info))
6042         return -TARGET_EINVAL;
6043     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6044         return -TARGET_EFAULT;
6045     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6046     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6047     ldt_info.limit = tswap32(target_ldt_info->limit);
6048     ldt_info.flags = tswap32(target_ldt_info->flags);
6049     unlock_user_struct(target_ldt_info, ptr, 0);
6050 
6051     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6052         return -TARGET_EINVAL;
6053     seg_32bit = ldt_info.flags & 1;
6054     contents = (ldt_info.flags >> 1) & 3;
6055     read_exec_only = (ldt_info.flags >> 3) & 1;
6056     limit_in_pages = (ldt_info.flags >> 4) & 1;
6057     seg_not_present = (ldt_info.flags >> 5) & 1;
6058     useable = (ldt_info.flags >> 6) & 1;
6059 #ifdef TARGET_ABI32
6060     lm = 0;
6061 #else
6062     lm = (ldt_info.flags >> 7) & 1;
6063 #endif
6064     if (contents == 3) {
6065         if (oldmode)
6066             return -TARGET_EINVAL;
6067         if (seg_not_present == 0)
6068             return -TARGET_EINVAL;
6069     }
6070     /* allocate the LDT */
6071     if (!ldt_table) {
6072         env->ldt.base = target_mmap(0,
6073                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6074                                     PROT_READ|PROT_WRITE,
6075                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6076         if (env->ldt.base == -1)
6077             return -TARGET_ENOMEM;
6078         memset(g2h_untagged(env->ldt.base), 0,
6079                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6080         env->ldt.limit = 0xffff;
6081         ldt_table = g2h_untagged(env->ldt.base);
6082     }
6083 
6084     /* NOTE: same code as Linux kernel */
6085     /* Allow LDTs to be cleared by the user. */
6086     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6087         if (oldmode ||
6088             (contents == 0		&&
6089              read_exec_only == 1	&&
6090              seg_32bit == 0		&&
6091              limit_in_pages == 0	&&
6092              seg_not_present == 1	&&
6093              useable == 0 )) {
6094             entry_1 = 0;
6095             entry_2 = 0;
6096             goto install;
6097         }
6098     }
6099 
6100     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6101         (ldt_info.limit & 0x0ffff);
6102     entry_2 = (ldt_info.base_addr & 0xff000000) |
6103         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6104         (ldt_info.limit & 0xf0000) |
6105         ((read_exec_only ^ 1) << 9) |
6106         (contents << 10) |
6107         ((seg_not_present ^ 1) << 15) |
6108         (seg_32bit << 22) |
6109         (limit_in_pages << 23) |
6110         (lm << 21) |
6111         0x7000;
6112     if (!oldmode)
6113         entry_2 |= (useable << 20);
6114 
6115     /* Install the new entry ...  */
6116 install:
6117     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6118     lp[0] = tswap32(entry_1);
6119     lp[1] = tswap32(entry_2);
6120     return 0;
6121 }
6122 
6123 /* specific and weird i386 syscalls */
6124 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6125                               unsigned long bytecount)
6126 {
6127     abi_long ret;
6128 
6129     switch (func) {
6130     case 0:
6131         ret = read_ldt(ptr, bytecount);
6132         break;
6133     case 1:
6134         ret = write_ldt(env, ptr, bytecount, 1);
6135         break;
6136     case 0x11:
6137         ret = write_ldt(env, ptr, bytecount, 0);
6138         break;
6139     default:
6140         ret = -TARGET_ENOSYS;
6141         break;
6142     }
6143     return ret;
6144 }
6145 
6146 #if defined(TARGET_ABI32)
6147 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6148 {
6149     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6150     struct target_modify_ldt_ldt_s ldt_info;
6151     struct target_modify_ldt_ldt_s *target_ldt_info;
6152     int seg_32bit, contents, read_exec_only, limit_in_pages;
6153     int seg_not_present, useable, lm;
6154     uint32_t *lp, entry_1, entry_2;
6155     int i;
6156 
6157     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6158     if (!target_ldt_info)
6159         return -TARGET_EFAULT;
6160     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6161     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6162     ldt_info.limit = tswap32(target_ldt_info->limit);
6163     ldt_info.flags = tswap32(target_ldt_info->flags);
6164     if (ldt_info.entry_number == -1) {
6165         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6166             if (gdt_table[i] == 0) {
6167                 ldt_info.entry_number = i;
6168                 target_ldt_info->entry_number = tswap32(i);
6169                 break;
6170             }
6171         }
6172     }
6173     unlock_user_struct(target_ldt_info, ptr, 1);
6174 
6175     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6176         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6177            return -TARGET_EINVAL;
6178     seg_32bit = ldt_info.flags & 1;
6179     contents = (ldt_info.flags >> 1) & 3;
6180     read_exec_only = (ldt_info.flags >> 3) & 1;
6181     limit_in_pages = (ldt_info.flags >> 4) & 1;
6182     seg_not_present = (ldt_info.flags >> 5) & 1;
6183     useable = (ldt_info.flags >> 6) & 1;
6184 #ifdef TARGET_ABI32
6185     lm = 0;
6186 #else
6187     lm = (ldt_info.flags >> 7) & 1;
6188 #endif
6189 
6190     if (contents == 3) {
6191         if (seg_not_present == 0)
6192             return -TARGET_EINVAL;
6193     }
6194 
6195     /* NOTE: same code as Linux kernel */
6196     /* Allow LDTs to be cleared by the user. */
6197     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6198         if ((contents == 0             &&
6199              read_exec_only == 1       &&
6200              seg_32bit == 0            &&
6201              limit_in_pages == 0       &&
6202              seg_not_present == 1      &&
6203              useable == 0 )) {
6204             entry_1 = 0;
6205             entry_2 = 0;
6206             goto install;
6207         }
6208     }
6209 
6210     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6211         (ldt_info.limit & 0x0ffff);
6212     entry_2 = (ldt_info.base_addr & 0xff000000) |
6213         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6214         (ldt_info.limit & 0xf0000) |
6215         ((read_exec_only ^ 1) << 9) |
6216         (contents << 10) |
6217         ((seg_not_present ^ 1) << 15) |
6218         (seg_32bit << 22) |
6219         (limit_in_pages << 23) |
6220         (useable << 20) |
6221         (lm << 21) |
6222         0x7000;
6223 
6224     /* Install the new entry ...  */
6225 install:
6226     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6227     lp[0] = tswap32(entry_1);
6228     lp[1] = tswap32(entry_2);
6229     return 0;
6230 }
6231 
6232 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6233 {
6234     struct target_modify_ldt_ldt_s *target_ldt_info;
6235     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6236     uint32_t base_addr, limit, flags;
6237     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6238     int seg_not_present, useable, lm;
6239     uint32_t *lp, entry_1, entry_2;
6240 
6241     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6242     if (!target_ldt_info)
6243         return -TARGET_EFAULT;
6244     idx = tswap32(target_ldt_info->entry_number);
6245     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6246         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6247         unlock_user_struct(target_ldt_info, ptr, 1);
6248         return -TARGET_EINVAL;
6249     }
6250     lp = (uint32_t *)(gdt_table + idx);
6251     entry_1 = tswap32(lp[0]);
6252     entry_2 = tswap32(lp[1]);
6253 
6254     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6255     contents = (entry_2 >> 10) & 3;
6256     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6257     seg_32bit = (entry_2 >> 22) & 1;
6258     limit_in_pages = (entry_2 >> 23) & 1;
6259     useable = (entry_2 >> 20) & 1;
6260 #ifdef TARGET_ABI32
6261     lm = 0;
6262 #else
6263     lm = (entry_2 >> 21) & 1;
6264 #endif
6265     flags = (seg_32bit << 0) | (contents << 1) |
6266         (read_exec_only << 3) | (limit_in_pages << 4) |
6267         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6268     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6269     base_addr = (entry_1 >> 16) |
6270         (entry_2 & 0xff000000) |
6271         ((entry_2 & 0xff) << 16);
6272     target_ldt_info->base_addr = tswapal(base_addr);
6273     target_ldt_info->limit = tswap32(limit);
6274     target_ldt_info->flags = tswap32(flags);
6275     unlock_user_struct(target_ldt_info, ptr, 1);
6276     return 0;
6277 }
6278 
6279 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6280 {
6281     return -TARGET_ENOSYS;
6282 }
6283 #else
6284 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6285 {
6286     abi_long ret = 0;
6287     abi_ulong val;
6288     int idx;
6289 
6290     switch(code) {
6291     case TARGET_ARCH_SET_GS:
6292     case TARGET_ARCH_SET_FS:
6293         if (code == TARGET_ARCH_SET_GS)
6294             idx = R_GS;
6295         else
6296             idx = R_FS;
6297         cpu_x86_load_seg(env, idx, 0);
6298         env->segs[idx].base = addr;
6299         break;
6300     case TARGET_ARCH_GET_GS:
6301     case TARGET_ARCH_GET_FS:
6302         if (code == TARGET_ARCH_GET_GS)
6303             idx = R_GS;
6304         else
6305             idx = R_FS;
6306         val = env->segs[idx].base;
6307         if (put_user(val, addr, abi_ulong))
6308             ret = -TARGET_EFAULT;
6309         break;
6310     default:
6311         ret = -TARGET_EINVAL;
6312         break;
6313     }
6314     return ret;
6315 }
6316 #endif /* defined(TARGET_ABI32 */
6317 #endif /* defined(TARGET_I386) */
6318 
6319 /*
6320  * These constants are generic.  Supply any that are missing from the host.
6321  */
6322 #ifndef PR_SET_NAME
6323 # define PR_SET_NAME    15
6324 # define PR_GET_NAME    16
6325 #endif
6326 #ifndef PR_SET_FP_MODE
6327 # define PR_SET_FP_MODE 45
6328 # define PR_GET_FP_MODE 46
6329 # define PR_FP_MODE_FR   (1 << 0)
6330 # define PR_FP_MODE_FRE  (1 << 1)
6331 #endif
6332 #ifndef PR_SVE_SET_VL
6333 # define PR_SVE_SET_VL  50
6334 # define PR_SVE_GET_VL  51
6335 # define PR_SVE_VL_LEN_MASK  0xffff
6336 # define PR_SVE_VL_INHERIT   (1 << 17)
6337 #endif
6338 #ifndef PR_PAC_RESET_KEYS
6339 # define PR_PAC_RESET_KEYS  54
6340 # define PR_PAC_APIAKEY   (1 << 0)
6341 # define PR_PAC_APIBKEY   (1 << 1)
6342 # define PR_PAC_APDAKEY   (1 << 2)
6343 # define PR_PAC_APDBKEY   (1 << 3)
6344 # define PR_PAC_APGAKEY   (1 << 4)
6345 #endif
6346 #ifndef PR_SET_TAGGED_ADDR_CTRL
6347 # define PR_SET_TAGGED_ADDR_CTRL 55
6348 # define PR_GET_TAGGED_ADDR_CTRL 56
6349 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6350 #endif
6351 #ifndef PR_MTE_TCF_SHIFT
6352 # define PR_MTE_TCF_SHIFT       1
6353 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6354 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6355 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6356 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6357 # define PR_MTE_TAG_SHIFT       3
6358 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6359 #endif
6360 #ifndef PR_SET_IO_FLUSHER
6361 # define PR_SET_IO_FLUSHER 57
6362 # define PR_GET_IO_FLUSHER 58
6363 #endif
6364 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6365 # define PR_SET_SYSCALL_USER_DISPATCH 59
6366 #endif
6367 #ifndef PR_SME_SET_VL
6368 # define PR_SME_SET_VL  63
6369 # define PR_SME_GET_VL  64
6370 # define PR_SME_VL_LEN_MASK  0xffff
6371 # define PR_SME_VL_INHERIT   (1 << 17)
6372 #endif
6373 
6374 #include "target_prctl.h"
6375 
6376 static abi_long do_prctl_inval0(CPUArchState *env)
6377 {
6378     return -TARGET_EINVAL;
6379 }
6380 
6381 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6382 {
6383     return -TARGET_EINVAL;
6384 }
6385 
6386 #ifndef do_prctl_get_fp_mode
6387 #define do_prctl_get_fp_mode do_prctl_inval0
6388 #endif
6389 #ifndef do_prctl_set_fp_mode
6390 #define do_prctl_set_fp_mode do_prctl_inval1
6391 #endif
6392 #ifndef do_prctl_sve_get_vl
6393 #define do_prctl_sve_get_vl do_prctl_inval0
6394 #endif
6395 #ifndef do_prctl_sve_set_vl
6396 #define do_prctl_sve_set_vl do_prctl_inval1
6397 #endif
6398 #ifndef do_prctl_reset_keys
6399 #define do_prctl_reset_keys do_prctl_inval1
6400 #endif
6401 #ifndef do_prctl_set_tagged_addr_ctrl
6402 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6403 #endif
6404 #ifndef do_prctl_get_tagged_addr_ctrl
6405 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6406 #endif
6407 #ifndef do_prctl_get_unalign
6408 #define do_prctl_get_unalign do_prctl_inval1
6409 #endif
6410 #ifndef do_prctl_set_unalign
6411 #define do_prctl_set_unalign do_prctl_inval1
6412 #endif
6413 #ifndef do_prctl_sme_get_vl
6414 #define do_prctl_sme_get_vl do_prctl_inval0
6415 #endif
6416 #ifndef do_prctl_sme_set_vl
6417 #define do_prctl_sme_set_vl do_prctl_inval1
6418 #endif
6419 
6420 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6421                          abi_long arg3, abi_long arg4, abi_long arg5)
6422 {
6423     abi_long ret;
6424 
6425     switch (option) {
6426     case PR_GET_PDEATHSIG:
6427         {
6428             int deathsig;
6429             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6430                                   arg3, arg4, arg5));
6431             if (!is_error(ret) &&
6432                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6433                 return -TARGET_EFAULT;
6434             }
6435             return ret;
6436         }
6437     case PR_SET_PDEATHSIG:
6438         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6439                                arg3, arg4, arg5));
6440     case PR_GET_NAME:
6441         {
6442             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6443             if (!name) {
6444                 return -TARGET_EFAULT;
6445             }
6446             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6447                                   arg3, arg4, arg5));
6448             unlock_user(name, arg2, 16);
6449             return ret;
6450         }
6451     case PR_SET_NAME:
6452         {
6453             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6454             if (!name) {
6455                 return -TARGET_EFAULT;
6456             }
6457             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6458                                   arg3, arg4, arg5));
6459             unlock_user(name, arg2, 0);
6460             return ret;
6461         }
6462     case PR_GET_FP_MODE:
6463         return do_prctl_get_fp_mode(env);
6464     case PR_SET_FP_MODE:
6465         return do_prctl_set_fp_mode(env, arg2);
6466     case PR_SVE_GET_VL:
6467         return do_prctl_sve_get_vl(env);
6468     case PR_SVE_SET_VL:
6469         return do_prctl_sve_set_vl(env, arg2);
6470     case PR_SME_GET_VL:
6471         return do_prctl_sme_get_vl(env);
6472     case PR_SME_SET_VL:
6473         return do_prctl_sme_set_vl(env, arg2);
6474     case PR_PAC_RESET_KEYS:
6475         if (arg3 || arg4 || arg5) {
6476             return -TARGET_EINVAL;
6477         }
6478         return do_prctl_reset_keys(env, arg2);
6479     case PR_SET_TAGGED_ADDR_CTRL:
6480         if (arg3 || arg4 || arg5) {
6481             return -TARGET_EINVAL;
6482         }
6483         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6484     case PR_GET_TAGGED_ADDR_CTRL:
6485         if (arg2 || arg3 || arg4 || arg5) {
6486             return -TARGET_EINVAL;
6487         }
6488         return do_prctl_get_tagged_addr_ctrl(env);
6489 
6490     case PR_GET_UNALIGN:
6491         return do_prctl_get_unalign(env, arg2);
6492     case PR_SET_UNALIGN:
6493         return do_prctl_set_unalign(env, arg2);
6494 
6495     case PR_CAP_AMBIENT:
6496     case PR_CAPBSET_READ:
6497     case PR_CAPBSET_DROP:
6498     case PR_GET_DUMPABLE:
6499     case PR_SET_DUMPABLE:
6500     case PR_GET_KEEPCAPS:
6501     case PR_SET_KEEPCAPS:
6502     case PR_GET_SECUREBITS:
6503     case PR_SET_SECUREBITS:
6504     case PR_GET_TIMING:
6505     case PR_SET_TIMING:
6506     case PR_GET_TIMERSLACK:
6507     case PR_SET_TIMERSLACK:
6508     case PR_MCE_KILL:
6509     case PR_MCE_KILL_GET:
6510     case PR_GET_NO_NEW_PRIVS:
6511     case PR_SET_NO_NEW_PRIVS:
6512     case PR_GET_IO_FLUSHER:
6513     case PR_SET_IO_FLUSHER:
6514         /* Some prctl options have no pointer arguments and we can pass on. */
6515         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6516 
6517     case PR_GET_CHILD_SUBREAPER:
6518     case PR_SET_CHILD_SUBREAPER:
6519     case PR_GET_SPECULATION_CTRL:
6520     case PR_SET_SPECULATION_CTRL:
6521     case PR_GET_TID_ADDRESS:
6522         /* TODO */
6523         return -TARGET_EINVAL;
6524 
6525     case PR_GET_FPEXC:
6526     case PR_SET_FPEXC:
6527         /* Was used for SPE on PowerPC. */
6528         return -TARGET_EINVAL;
6529 
6530     case PR_GET_ENDIAN:
6531     case PR_SET_ENDIAN:
6532     case PR_GET_FPEMU:
6533     case PR_SET_FPEMU:
6534     case PR_SET_MM:
6535     case PR_GET_SECCOMP:
6536     case PR_SET_SECCOMP:
6537     case PR_SET_SYSCALL_USER_DISPATCH:
6538     case PR_GET_THP_DISABLE:
6539     case PR_SET_THP_DISABLE:
6540     case PR_GET_TSC:
6541     case PR_SET_TSC:
6542         /* Disable to prevent the target disabling stuff we need. */
6543         return -TARGET_EINVAL;
6544 
6545     default:
6546         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6547                       option);
6548         return -TARGET_EINVAL;
6549     }
6550 }
6551 
6552 #define NEW_STACK_SIZE 0x40000
6553 
6554 
6555 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6556 typedef struct {
6557     CPUArchState *env;
6558     pthread_mutex_t mutex;
6559     pthread_cond_t cond;
6560     pthread_t thread;
6561     uint32_t tid;
6562     abi_ulong child_tidptr;
6563     abi_ulong parent_tidptr;
6564     sigset_t sigmask;
6565 } new_thread_info;
6566 
6567 static void *clone_func(void *arg)
6568 {
6569     new_thread_info *info = arg;
6570     CPUArchState *env;
6571     CPUState *cpu;
6572     TaskState *ts;
6573 
6574     rcu_register_thread();
6575     tcg_register_thread();
6576     env = info->env;
6577     cpu = env_cpu(env);
6578     thread_cpu = cpu;
6579     ts = (TaskState *)cpu->opaque;
6580     info->tid = sys_gettid();
6581     task_settid(ts);
6582     if (info->child_tidptr)
6583         put_user_u32(info->tid, info->child_tidptr);
6584     if (info->parent_tidptr)
6585         put_user_u32(info->tid, info->parent_tidptr);
6586     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6587     /* Enable signals.  */
6588     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6589     /* Signal to the parent that we're ready.  */
6590     pthread_mutex_lock(&info->mutex);
6591     pthread_cond_broadcast(&info->cond);
6592     pthread_mutex_unlock(&info->mutex);
6593     /* Wait until the parent has finished initializing the tls state.  */
6594     pthread_mutex_lock(&clone_lock);
6595     pthread_mutex_unlock(&clone_lock);
6596     cpu_loop(env);
6597     /* never exits */
6598     return NULL;
6599 }
6600 
6601 /* do_fork() Must return host values and target errnos (unlike most
6602    do_*() functions). */
6603 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6604                    abi_ulong parent_tidptr, target_ulong newtls,
6605                    abi_ulong child_tidptr)
6606 {
6607     CPUState *cpu = env_cpu(env);
6608     int ret;
6609     TaskState *ts;
6610     CPUState *new_cpu;
6611     CPUArchState *new_env;
6612     sigset_t sigmask;
6613 
6614     flags &= ~CLONE_IGNORED_FLAGS;
6615 
6616     /* Emulate vfork() with fork() */
6617     if (flags & CLONE_VFORK)
6618         flags &= ~(CLONE_VFORK | CLONE_VM);
6619 
6620     if (flags & CLONE_VM) {
6621         TaskState *parent_ts = (TaskState *)cpu->opaque;
6622         new_thread_info info;
6623         pthread_attr_t attr;
6624 
6625         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6626             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6627             return -TARGET_EINVAL;
6628         }
6629 
6630         ts = g_new0(TaskState, 1);
6631         init_task_state(ts);
6632 
6633         /* Grab a mutex so that thread setup appears atomic.  */
6634         pthread_mutex_lock(&clone_lock);
6635 
6636         /*
6637          * If this is our first additional thread, we need to ensure we
6638          * generate code for parallel execution and flush old translations.
6639          * Do this now so that the copy gets CF_PARALLEL too.
6640          */
6641         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6642             cpu->tcg_cflags |= CF_PARALLEL;
6643             tb_flush(cpu);
6644         }
6645 
6646         /* we create a new CPU instance. */
6647         new_env = cpu_copy(env);
6648         /* Init regs that differ from the parent.  */
6649         cpu_clone_regs_child(new_env, newsp, flags);
6650         cpu_clone_regs_parent(env, flags);
6651         new_cpu = env_cpu(new_env);
6652         new_cpu->opaque = ts;
6653         ts->bprm = parent_ts->bprm;
6654         ts->info = parent_ts->info;
6655         ts->signal_mask = parent_ts->signal_mask;
6656 
6657         if (flags & CLONE_CHILD_CLEARTID) {
6658             ts->child_tidptr = child_tidptr;
6659         }
6660 
6661         if (flags & CLONE_SETTLS) {
6662             cpu_set_tls (new_env, newtls);
6663         }
6664 
6665         memset(&info, 0, sizeof(info));
6666         pthread_mutex_init(&info.mutex, NULL);
6667         pthread_mutex_lock(&info.mutex);
6668         pthread_cond_init(&info.cond, NULL);
6669         info.env = new_env;
6670         if (flags & CLONE_CHILD_SETTID) {
6671             info.child_tidptr = child_tidptr;
6672         }
6673         if (flags & CLONE_PARENT_SETTID) {
6674             info.parent_tidptr = parent_tidptr;
6675         }
6676 
6677         ret = pthread_attr_init(&attr);
6678         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6679         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6680         /* It is not safe to deliver signals until the child has finished
6681            initializing, so temporarily block all signals.  */
6682         sigfillset(&sigmask);
6683         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6684         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6685 
6686         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6687         /* TODO: Free new CPU state if thread creation failed.  */
6688 
6689         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6690         pthread_attr_destroy(&attr);
6691         if (ret == 0) {
6692             /* Wait for the child to initialize.  */
6693             pthread_cond_wait(&info.cond, &info.mutex);
6694             ret = info.tid;
6695         } else {
6696             ret = -1;
6697         }
6698         pthread_mutex_unlock(&info.mutex);
6699         pthread_cond_destroy(&info.cond);
6700         pthread_mutex_destroy(&info.mutex);
6701         pthread_mutex_unlock(&clone_lock);
6702     } else {
6703         /* if no CLONE_VM, we consider it is a fork */
6704         if (flags & CLONE_INVALID_FORK_FLAGS) {
6705             return -TARGET_EINVAL;
6706         }
6707 
6708         /* We can't support custom termination signals */
6709         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6710             return -TARGET_EINVAL;
6711         }
6712 
6713         if (block_signals()) {
6714             return -QEMU_ERESTARTSYS;
6715         }
6716 
6717         fork_start();
6718         ret = fork();
6719         if (ret == 0) {
6720             /* Child Process.  */
6721             cpu_clone_regs_child(env, newsp, flags);
6722             fork_end(1);
6723             /* There is a race condition here.  The parent process could
6724                theoretically read the TID in the child process before the child
6725                tid is set.  This would require using either ptrace
6726                (not implemented) or having *_tidptr to point at a shared memory
6727                mapping.  We can't repeat the spinlock hack used above because
6728                the child process gets its own copy of the lock.  */
6729             if (flags & CLONE_CHILD_SETTID)
6730                 put_user_u32(sys_gettid(), child_tidptr);
6731             if (flags & CLONE_PARENT_SETTID)
6732                 put_user_u32(sys_gettid(), parent_tidptr);
6733             ts = (TaskState *)cpu->opaque;
6734             if (flags & CLONE_SETTLS)
6735                 cpu_set_tls (env, newtls);
6736             if (flags & CLONE_CHILD_CLEARTID)
6737                 ts->child_tidptr = child_tidptr;
6738         } else {
6739             cpu_clone_regs_parent(env, flags);
6740             fork_end(0);
6741         }
6742     }
6743     return ret;
6744 }
6745 
6746 /* warning : doesn't handle linux specific flags... */
6747 static int target_to_host_fcntl_cmd(int cmd)
6748 {
6749     int ret;
6750 
6751     switch(cmd) {
6752     case TARGET_F_DUPFD:
6753     case TARGET_F_GETFD:
6754     case TARGET_F_SETFD:
6755     case TARGET_F_GETFL:
6756     case TARGET_F_SETFL:
6757     case TARGET_F_OFD_GETLK:
6758     case TARGET_F_OFD_SETLK:
6759     case TARGET_F_OFD_SETLKW:
6760         ret = cmd;
6761         break;
6762     case TARGET_F_GETLK:
6763         ret = F_GETLK64;
6764         break;
6765     case TARGET_F_SETLK:
6766         ret = F_SETLK64;
6767         break;
6768     case TARGET_F_SETLKW:
6769         ret = F_SETLKW64;
6770         break;
6771     case TARGET_F_GETOWN:
6772         ret = F_GETOWN;
6773         break;
6774     case TARGET_F_SETOWN:
6775         ret = F_SETOWN;
6776         break;
6777     case TARGET_F_GETSIG:
6778         ret = F_GETSIG;
6779         break;
6780     case TARGET_F_SETSIG:
6781         ret = F_SETSIG;
6782         break;
6783 #if TARGET_ABI_BITS == 32
6784     case TARGET_F_GETLK64:
6785         ret = F_GETLK64;
6786         break;
6787     case TARGET_F_SETLK64:
6788         ret = F_SETLK64;
6789         break;
6790     case TARGET_F_SETLKW64:
6791         ret = F_SETLKW64;
6792         break;
6793 #endif
6794     case TARGET_F_SETLEASE:
6795         ret = F_SETLEASE;
6796         break;
6797     case TARGET_F_GETLEASE:
6798         ret = F_GETLEASE;
6799         break;
6800 #ifdef F_DUPFD_CLOEXEC
6801     case TARGET_F_DUPFD_CLOEXEC:
6802         ret = F_DUPFD_CLOEXEC;
6803         break;
6804 #endif
6805     case TARGET_F_NOTIFY:
6806         ret = F_NOTIFY;
6807         break;
6808 #ifdef F_GETOWN_EX
6809     case TARGET_F_GETOWN_EX:
6810         ret = F_GETOWN_EX;
6811         break;
6812 #endif
6813 #ifdef F_SETOWN_EX
6814     case TARGET_F_SETOWN_EX:
6815         ret = F_SETOWN_EX;
6816         break;
6817 #endif
6818 #ifdef F_SETPIPE_SZ
6819     case TARGET_F_SETPIPE_SZ:
6820         ret = F_SETPIPE_SZ;
6821         break;
6822     case TARGET_F_GETPIPE_SZ:
6823         ret = F_GETPIPE_SZ;
6824         break;
6825 #endif
6826 #ifdef F_ADD_SEALS
6827     case TARGET_F_ADD_SEALS:
6828         ret = F_ADD_SEALS;
6829         break;
6830     case TARGET_F_GET_SEALS:
6831         ret = F_GET_SEALS;
6832         break;
6833 #endif
6834     default:
6835         ret = -TARGET_EINVAL;
6836         break;
6837     }
6838 
6839 #if defined(__powerpc64__)
6840     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6841      * is not supported by kernel. The glibc fcntl call actually adjusts
6842      * them to 5, 6 and 7 before making the syscall(). Since we make the
6843      * syscall directly, adjust to what is supported by the kernel.
6844      */
6845     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6846         ret -= F_GETLK64 - 5;
6847     }
6848 #endif
6849 
6850     return ret;
6851 }
6852 
6853 #define FLOCK_TRANSTBL \
6854     switch (type) { \
6855     TRANSTBL_CONVERT(F_RDLCK); \
6856     TRANSTBL_CONVERT(F_WRLCK); \
6857     TRANSTBL_CONVERT(F_UNLCK); \
6858     }
6859 
6860 static int target_to_host_flock(int type)
6861 {
6862 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6863     FLOCK_TRANSTBL
6864 #undef  TRANSTBL_CONVERT
6865     return -TARGET_EINVAL;
6866 }
6867 
6868 static int host_to_target_flock(int type)
6869 {
6870 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6871     FLOCK_TRANSTBL
6872 #undef  TRANSTBL_CONVERT
6873     /* if we don't know how to convert the value coming
6874      * from the host we copy to the target field as-is
6875      */
6876     return type;
6877 }
6878 
6879 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6880                                             abi_ulong target_flock_addr)
6881 {
6882     struct target_flock *target_fl;
6883     int l_type;
6884 
6885     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6886         return -TARGET_EFAULT;
6887     }
6888 
6889     __get_user(l_type, &target_fl->l_type);
6890     l_type = target_to_host_flock(l_type);
6891     if (l_type < 0) {
6892         return l_type;
6893     }
6894     fl->l_type = l_type;
6895     __get_user(fl->l_whence, &target_fl->l_whence);
6896     __get_user(fl->l_start, &target_fl->l_start);
6897     __get_user(fl->l_len, &target_fl->l_len);
6898     __get_user(fl->l_pid, &target_fl->l_pid);
6899     unlock_user_struct(target_fl, target_flock_addr, 0);
6900     return 0;
6901 }
6902 
6903 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6904                                           const struct flock64 *fl)
6905 {
6906     struct target_flock *target_fl;
6907     short l_type;
6908 
6909     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6910         return -TARGET_EFAULT;
6911     }
6912 
6913     l_type = host_to_target_flock(fl->l_type);
6914     __put_user(l_type, &target_fl->l_type);
6915     __put_user(fl->l_whence, &target_fl->l_whence);
6916     __put_user(fl->l_start, &target_fl->l_start);
6917     __put_user(fl->l_len, &target_fl->l_len);
6918     __put_user(fl->l_pid, &target_fl->l_pid);
6919     unlock_user_struct(target_fl, target_flock_addr, 1);
6920     return 0;
6921 }
6922 
6923 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6924 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6925 
6926 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6927 struct target_oabi_flock64 {
6928     abi_short l_type;
6929     abi_short l_whence;
6930     abi_llong l_start;
6931     abi_llong l_len;
6932     abi_int   l_pid;
6933 } QEMU_PACKED;
6934 
6935 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6936                                                    abi_ulong target_flock_addr)
6937 {
6938     struct target_oabi_flock64 *target_fl;
6939     int l_type;
6940 
6941     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6942         return -TARGET_EFAULT;
6943     }
6944 
6945     __get_user(l_type, &target_fl->l_type);
6946     l_type = target_to_host_flock(l_type);
6947     if (l_type < 0) {
6948         return l_type;
6949     }
6950     fl->l_type = l_type;
6951     __get_user(fl->l_whence, &target_fl->l_whence);
6952     __get_user(fl->l_start, &target_fl->l_start);
6953     __get_user(fl->l_len, &target_fl->l_len);
6954     __get_user(fl->l_pid, &target_fl->l_pid);
6955     unlock_user_struct(target_fl, target_flock_addr, 0);
6956     return 0;
6957 }
6958 
6959 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6960                                                  const struct flock64 *fl)
6961 {
6962     struct target_oabi_flock64 *target_fl;
6963     short l_type;
6964 
6965     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6966         return -TARGET_EFAULT;
6967     }
6968 
6969     l_type = host_to_target_flock(fl->l_type);
6970     __put_user(l_type, &target_fl->l_type);
6971     __put_user(fl->l_whence, &target_fl->l_whence);
6972     __put_user(fl->l_start, &target_fl->l_start);
6973     __put_user(fl->l_len, &target_fl->l_len);
6974     __put_user(fl->l_pid, &target_fl->l_pid);
6975     unlock_user_struct(target_fl, target_flock_addr, 1);
6976     return 0;
6977 }
6978 #endif
6979 
6980 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6981                                               abi_ulong target_flock_addr)
6982 {
6983     struct target_flock64 *target_fl;
6984     int l_type;
6985 
6986     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6987         return -TARGET_EFAULT;
6988     }
6989 
6990     __get_user(l_type, &target_fl->l_type);
6991     l_type = target_to_host_flock(l_type);
6992     if (l_type < 0) {
6993         return l_type;
6994     }
6995     fl->l_type = l_type;
6996     __get_user(fl->l_whence, &target_fl->l_whence);
6997     __get_user(fl->l_start, &target_fl->l_start);
6998     __get_user(fl->l_len, &target_fl->l_len);
6999     __get_user(fl->l_pid, &target_fl->l_pid);
7000     unlock_user_struct(target_fl, target_flock_addr, 0);
7001     return 0;
7002 }
7003 
7004 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7005                                             const struct flock64 *fl)
7006 {
7007     struct target_flock64 *target_fl;
7008     short l_type;
7009 
7010     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7011         return -TARGET_EFAULT;
7012     }
7013 
7014     l_type = host_to_target_flock(fl->l_type);
7015     __put_user(l_type, &target_fl->l_type);
7016     __put_user(fl->l_whence, &target_fl->l_whence);
7017     __put_user(fl->l_start, &target_fl->l_start);
7018     __put_user(fl->l_len, &target_fl->l_len);
7019     __put_user(fl->l_pid, &target_fl->l_pid);
7020     unlock_user_struct(target_fl, target_flock_addr, 1);
7021     return 0;
7022 }
7023 
7024 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7025 {
7026     struct flock64 fl64;
7027 #ifdef F_GETOWN_EX
7028     struct f_owner_ex fox;
7029     struct target_f_owner_ex *target_fox;
7030 #endif
7031     abi_long ret;
7032     int host_cmd = target_to_host_fcntl_cmd(cmd);
7033 
7034     if (host_cmd == -TARGET_EINVAL)
7035 	    return host_cmd;
7036 
7037     switch(cmd) {
7038     case TARGET_F_GETLK:
7039         ret = copy_from_user_flock(&fl64, arg);
7040         if (ret) {
7041             return ret;
7042         }
7043         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7044         if (ret == 0) {
7045             ret = copy_to_user_flock(arg, &fl64);
7046         }
7047         break;
7048 
7049     case TARGET_F_SETLK:
7050     case TARGET_F_SETLKW:
7051         ret = copy_from_user_flock(&fl64, arg);
7052         if (ret) {
7053             return ret;
7054         }
7055         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7056         break;
7057 
7058     case TARGET_F_GETLK64:
7059     case TARGET_F_OFD_GETLK:
7060         ret = copy_from_user_flock64(&fl64, arg);
7061         if (ret) {
7062             return ret;
7063         }
7064         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7065         if (ret == 0) {
7066             ret = copy_to_user_flock64(arg, &fl64);
7067         }
7068         break;
7069     case TARGET_F_SETLK64:
7070     case TARGET_F_SETLKW64:
7071     case TARGET_F_OFD_SETLK:
7072     case TARGET_F_OFD_SETLKW:
7073         ret = copy_from_user_flock64(&fl64, arg);
7074         if (ret) {
7075             return ret;
7076         }
7077         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7078         break;
7079 
7080     case TARGET_F_GETFL:
7081         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7082         if (ret >= 0) {
7083             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7084         }
7085         break;
7086 
7087     case TARGET_F_SETFL:
7088         ret = get_errno(safe_fcntl(fd, host_cmd,
7089                                    target_to_host_bitmask(arg,
7090                                                           fcntl_flags_tbl)));
7091         break;
7092 
7093 #ifdef F_GETOWN_EX
7094     case TARGET_F_GETOWN_EX:
7095         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7096         if (ret >= 0) {
7097             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7098                 return -TARGET_EFAULT;
7099             target_fox->type = tswap32(fox.type);
7100             target_fox->pid = tswap32(fox.pid);
7101             unlock_user_struct(target_fox, arg, 1);
7102         }
7103         break;
7104 #endif
7105 
7106 #ifdef F_SETOWN_EX
7107     case TARGET_F_SETOWN_EX:
7108         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7109             return -TARGET_EFAULT;
7110         fox.type = tswap32(target_fox->type);
7111         fox.pid = tswap32(target_fox->pid);
7112         unlock_user_struct(target_fox, arg, 0);
7113         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7114         break;
7115 #endif
7116 
7117     case TARGET_F_SETSIG:
7118         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7119         break;
7120 
7121     case TARGET_F_GETSIG:
7122         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7123         break;
7124 
7125     case TARGET_F_SETOWN:
7126     case TARGET_F_GETOWN:
7127     case TARGET_F_SETLEASE:
7128     case TARGET_F_GETLEASE:
7129     case TARGET_F_SETPIPE_SZ:
7130     case TARGET_F_GETPIPE_SZ:
7131     case TARGET_F_ADD_SEALS:
7132     case TARGET_F_GET_SEALS:
7133         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7134         break;
7135 
7136     default:
7137         ret = get_errno(safe_fcntl(fd, cmd, arg));
7138         break;
7139     }
7140     return ret;
7141 }
7142 
7143 #ifdef USE_UID16
7144 
7145 static inline int high2lowuid(int uid)
7146 {
7147     if (uid > 65535)
7148         return 65534;
7149     else
7150         return uid;
7151 }
7152 
7153 static inline int high2lowgid(int gid)
7154 {
7155     if (gid > 65535)
7156         return 65534;
7157     else
7158         return gid;
7159 }
7160 
7161 static inline int low2highuid(int uid)
7162 {
7163     if ((int16_t)uid == -1)
7164         return -1;
7165     else
7166         return uid;
7167 }
7168 
7169 static inline int low2highgid(int gid)
7170 {
7171     if ((int16_t)gid == -1)
7172         return -1;
7173     else
7174         return gid;
7175 }
7176 static inline int tswapid(int id)
7177 {
7178     return tswap16(id);
7179 }
7180 
7181 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7182 
7183 #else /* !USE_UID16 */
7184 static inline int high2lowuid(int uid)
7185 {
7186     return uid;
7187 }
7188 static inline int high2lowgid(int gid)
7189 {
7190     return gid;
7191 }
7192 static inline int low2highuid(int uid)
7193 {
7194     return uid;
7195 }
7196 static inline int low2highgid(int gid)
7197 {
7198     return gid;
7199 }
7200 static inline int tswapid(int id)
7201 {
7202     return tswap32(id);
7203 }
7204 
7205 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7206 
7207 #endif /* USE_UID16 */
7208 
7209 /* We must do direct syscalls for setting UID/GID, because we want to
7210  * implement the Linux system call semantics of "change only for this thread",
7211  * not the libc/POSIX semantics of "change for all threads in process".
7212  * (See http://ewontfix.com/17/ for more details.)
7213  * We use the 32-bit version of the syscalls if present; if it is not
7214  * then either the host architecture supports 32-bit UIDs natively with
7215  * the standard syscall, or the 16-bit UID is the best we can do.
7216  */
7217 #ifdef __NR_setuid32
7218 #define __NR_sys_setuid __NR_setuid32
7219 #else
7220 #define __NR_sys_setuid __NR_setuid
7221 #endif
7222 #ifdef __NR_setgid32
7223 #define __NR_sys_setgid __NR_setgid32
7224 #else
7225 #define __NR_sys_setgid __NR_setgid
7226 #endif
7227 #ifdef __NR_setresuid32
7228 #define __NR_sys_setresuid __NR_setresuid32
7229 #else
7230 #define __NR_sys_setresuid __NR_setresuid
7231 #endif
7232 #ifdef __NR_setresgid32
7233 #define __NR_sys_setresgid __NR_setresgid32
7234 #else
7235 #define __NR_sys_setresgid __NR_setresgid
7236 #endif
7237 
7238 _syscall1(int, sys_setuid, uid_t, uid)
7239 _syscall1(int, sys_setgid, gid_t, gid)
7240 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7241 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7242 
7243 void syscall_init(void)
7244 {
7245     IOCTLEntry *ie;
7246     const argtype *arg_type;
7247     int size;
7248 
7249     thunk_init(STRUCT_MAX);
7250 
7251 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7252 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7253 #include "syscall_types.h"
7254 #undef STRUCT
7255 #undef STRUCT_SPECIAL
7256 
7257     /* we patch the ioctl size if necessary. We rely on the fact that
7258        no ioctl has all the bits at '1' in the size field */
7259     ie = ioctl_entries;
7260     while (ie->target_cmd != 0) {
7261         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7262             TARGET_IOC_SIZEMASK) {
7263             arg_type = ie->arg_type;
7264             if (arg_type[0] != TYPE_PTR) {
7265                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7266                         ie->target_cmd);
7267                 exit(1);
7268             }
7269             arg_type++;
7270             size = thunk_type_size(arg_type, 0);
7271             ie->target_cmd = (ie->target_cmd &
7272                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7273                 (size << TARGET_IOC_SIZESHIFT);
7274         }
7275 
7276         /* automatic consistency check if same arch */
7277 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7278     (defined(__x86_64__) && defined(TARGET_X86_64))
7279         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7280             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7281                     ie->name, ie->target_cmd, ie->host_cmd);
7282         }
7283 #endif
7284         ie++;
7285     }
7286 }
7287 
7288 #ifdef TARGET_NR_truncate64
7289 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7290                                          abi_long arg2,
7291                                          abi_long arg3,
7292                                          abi_long arg4)
7293 {
7294     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7295         arg2 = arg3;
7296         arg3 = arg4;
7297     }
7298     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7299 }
7300 #endif
7301 
7302 #ifdef TARGET_NR_ftruncate64
7303 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7304                                           abi_long arg2,
7305                                           abi_long arg3,
7306                                           abi_long arg4)
7307 {
7308     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7309         arg2 = arg3;
7310         arg3 = arg4;
7311     }
7312     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7313 }
7314 #endif
7315 
7316 #if defined(TARGET_NR_timer_settime) || \
7317     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7318 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7319                                                  abi_ulong target_addr)
7320 {
7321     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7322                                 offsetof(struct target_itimerspec,
7323                                          it_interval)) ||
7324         target_to_host_timespec(&host_its->it_value, target_addr +
7325                                 offsetof(struct target_itimerspec,
7326                                          it_value))) {
7327         return -TARGET_EFAULT;
7328     }
7329 
7330     return 0;
7331 }
7332 #endif
7333 
7334 #if defined(TARGET_NR_timer_settime64) || \
7335     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7336 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7337                                                    abi_ulong target_addr)
7338 {
7339     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7340                                   offsetof(struct target__kernel_itimerspec,
7341                                            it_interval)) ||
7342         target_to_host_timespec64(&host_its->it_value, target_addr +
7343                                   offsetof(struct target__kernel_itimerspec,
7344                                            it_value))) {
7345         return -TARGET_EFAULT;
7346     }
7347 
7348     return 0;
7349 }
7350 #endif
7351 
7352 #if ((defined(TARGET_NR_timerfd_gettime) || \
7353       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7354       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7355 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7356                                                  struct itimerspec *host_its)
7357 {
7358     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7359                                                        it_interval),
7360                                 &host_its->it_interval) ||
7361         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7362                                                        it_value),
7363                                 &host_its->it_value)) {
7364         return -TARGET_EFAULT;
7365     }
7366     return 0;
7367 }
7368 #endif
7369 
7370 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7371       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7372       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7373 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7374                                                    struct itimerspec *host_its)
7375 {
7376     if (host_to_target_timespec64(target_addr +
7377                                   offsetof(struct target__kernel_itimerspec,
7378                                            it_interval),
7379                                   &host_its->it_interval) ||
7380         host_to_target_timespec64(target_addr +
7381                                   offsetof(struct target__kernel_itimerspec,
7382                                            it_value),
7383                                   &host_its->it_value)) {
7384         return -TARGET_EFAULT;
7385     }
7386     return 0;
7387 }
7388 #endif
7389 
7390 #if defined(TARGET_NR_adjtimex) || \
7391     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7392 static inline abi_long target_to_host_timex(struct timex *host_tx,
7393                                             abi_long target_addr)
7394 {
7395     struct target_timex *target_tx;
7396 
7397     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7398         return -TARGET_EFAULT;
7399     }
7400 
7401     __get_user(host_tx->modes, &target_tx->modes);
7402     __get_user(host_tx->offset, &target_tx->offset);
7403     __get_user(host_tx->freq, &target_tx->freq);
7404     __get_user(host_tx->maxerror, &target_tx->maxerror);
7405     __get_user(host_tx->esterror, &target_tx->esterror);
7406     __get_user(host_tx->status, &target_tx->status);
7407     __get_user(host_tx->constant, &target_tx->constant);
7408     __get_user(host_tx->precision, &target_tx->precision);
7409     __get_user(host_tx->tolerance, &target_tx->tolerance);
7410     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7411     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7412     __get_user(host_tx->tick, &target_tx->tick);
7413     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7414     __get_user(host_tx->jitter, &target_tx->jitter);
7415     __get_user(host_tx->shift, &target_tx->shift);
7416     __get_user(host_tx->stabil, &target_tx->stabil);
7417     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7418     __get_user(host_tx->calcnt, &target_tx->calcnt);
7419     __get_user(host_tx->errcnt, &target_tx->errcnt);
7420     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7421     __get_user(host_tx->tai, &target_tx->tai);
7422 
7423     unlock_user_struct(target_tx, target_addr, 0);
7424     return 0;
7425 }
7426 
7427 static inline abi_long host_to_target_timex(abi_long target_addr,
7428                                             struct timex *host_tx)
7429 {
7430     struct target_timex *target_tx;
7431 
7432     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7433         return -TARGET_EFAULT;
7434     }
7435 
7436     __put_user(host_tx->modes, &target_tx->modes);
7437     __put_user(host_tx->offset, &target_tx->offset);
7438     __put_user(host_tx->freq, &target_tx->freq);
7439     __put_user(host_tx->maxerror, &target_tx->maxerror);
7440     __put_user(host_tx->esterror, &target_tx->esterror);
7441     __put_user(host_tx->status, &target_tx->status);
7442     __put_user(host_tx->constant, &target_tx->constant);
7443     __put_user(host_tx->precision, &target_tx->precision);
7444     __put_user(host_tx->tolerance, &target_tx->tolerance);
7445     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7446     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7447     __put_user(host_tx->tick, &target_tx->tick);
7448     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7449     __put_user(host_tx->jitter, &target_tx->jitter);
7450     __put_user(host_tx->shift, &target_tx->shift);
7451     __put_user(host_tx->stabil, &target_tx->stabil);
7452     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7453     __put_user(host_tx->calcnt, &target_tx->calcnt);
7454     __put_user(host_tx->errcnt, &target_tx->errcnt);
7455     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7456     __put_user(host_tx->tai, &target_tx->tai);
7457 
7458     unlock_user_struct(target_tx, target_addr, 1);
7459     return 0;
7460 }
7461 #endif
7462 
7463 
7464 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7465 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7466                                               abi_long target_addr)
7467 {
7468     struct target__kernel_timex *target_tx;
7469 
7470     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7471                                  offsetof(struct target__kernel_timex,
7472                                           time))) {
7473         return -TARGET_EFAULT;
7474     }
7475 
7476     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7477         return -TARGET_EFAULT;
7478     }
7479 
7480     __get_user(host_tx->modes, &target_tx->modes);
7481     __get_user(host_tx->offset, &target_tx->offset);
7482     __get_user(host_tx->freq, &target_tx->freq);
7483     __get_user(host_tx->maxerror, &target_tx->maxerror);
7484     __get_user(host_tx->esterror, &target_tx->esterror);
7485     __get_user(host_tx->status, &target_tx->status);
7486     __get_user(host_tx->constant, &target_tx->constant);
7487     __get_user(host_tx->precision, &target_tx->precision);
7488     __get_user(host_tx->tolerance, &target_tx->tolerance);
7489     __get_user(host_tx->tick, &target_tx->tick);
7490     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7491     __get_user(host_tx->jitter, &target_tx->jitter);
7492     __get_user(host_tx->shift, &target_tx->shift);
7493     __get_user(host_tx->stabil, &target_tx->stabil);
7494     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7495     __get_user(host_tx->calcnt, &target_tx->calcnt);
7496     __get_user(host_tx->errcnt, &target_tx->errcnt);
7497     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7498     __get_user(host_tx->tai, &target_tx->tai);
7499 
7500     unlock_user_struct(target_tx, target_addr, 0);
7501     return 0;
7502 }
7503 
7504 static inline abi_long host_to_target_timex64(abi_long target_addr,
7505                                               struct timex *host_tx)
7506 {
7507     struct target__kernel_timex *target_tx;
7508 
7509    if (copy_to_user_timeval64(target_addr +
7510                               offsetof(struct target__kernel_timex, time),
7511                               &host_tx->time)) {
7512         return -TARGET_EFAULT;
7513     }
7514 
7515     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7516         return -TARGET_EFAULT;
7517     }
7518 
7519     __put_user(host_tx->modes, &target_tx->modes);
7520     __put_user(host_tx->offset, &target_tx->offset);
7521     __put_user(host_tx->freq, &target_tx->freq);
7522     __put_user(host_tx->maxerror, &target_tx->maxerror);
7523     __put_user(host_tx->esterror, &target_tx->esterror);
7524     __put_user(host_tx->status, &target_tx->status);
7525     __put_user(host_tx->constant, &target_tx->constant);
7526     __put_user(host_tx->precision, &target_tx->precision);
7527     __put_user(host_tx->tolerance, &target_tx->tolerance);
7528     __put_user(host_tx->tick, &target_tx->tick);
7529     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7530     __put_user(host_tx->jitter, &target_tx->jitter);
7531     __put_user(host_tx->shift, &target_tx->shift);
7532     __put_user(host_tx->stabil, &target_tx->stabil);
7533     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7534     __put_user(host_tx->calcnt, &target_tx->calcnt);
7535     __put_user(host_tx->errcnt, &target_tx->errcnt);
7536     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7537     __put_user(host_tx->tai, &target_tx->tai);
7538 
7539     unlock_user_struct(target_tx, target_addr, 1);
7540     return 0;
7541 }
7542 #endif
7543 
7544 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7545 #define sigev_notify_thread_id _sigev_un._tid
7546 #endif
7547 
7548 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7549                                                abi_ulong target_addr)
7550 {
7551     struct target_sigevent *target_sevp;
7552 
7553     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7554         return -TARGET_EFAULT;
7555     }
7556 
7557     /* This union is awkward on 64 bit systems because it has a 32 bit
7558      * integer and a pointer in it; we follow the conversion approach
7559      * used for handling sigval types in signal.c so the guest should get
7560      * the correct value back even if we did a 64 bit byteswap and it's
7561      * using the 32 bit integer.
7562      */
7563     host_sevp->sigev_value.sival_ptr =
7564         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7565     host_sevp->sigev_signo =
7566         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7567     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7568     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7569 
7570     unlock_user_struct(target_sevp, target_addr, 1);
7571     return 0;
7572 }
7573 
7574 #if defined(TARGET_NR_mlockall)
7575 static inline int target_to_host_mlockall_arg(int arg)
7576 {
7577     int result = 0;
7578 
7579     if (arg & TARGET_MCL_CURRENT) {
7580         result |= MCL_CURRENT;
7581     }
7582     if (arg & TARGET_MCL_FUTURE) {
7583         result |= MCL_FUTURE;
7584     }
7585 #ifdef MCL_ONFAULT
7586     if (arg & TARGET_MCL_ONFAULT) {
7587         result |= MCL_ONFAULT;
7588     }
7589 #endif
7590 
7591     return result;
7592 }
7593 #endif
7594 
7595 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7596      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7597      defined(TARGET_NR_newfstatat))
7598 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7599                                              abi_ulong target_addr,
7600                                              struct stat *host_st)
7601 {
7602 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7603     if (cpu_env->eabi) {
7604         struct target_eabi_stat64 *target_st;
7605 
7606         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7607             return -TARGET_EFAULT;
7608         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7609         __put_user(host_st->st_dev, &target_st->st_dev);
7610         __put_user(host_st->st_ino, &target_st->st_ino);
7611 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7612         __put_user(host_st->st_ino, &target_st->__st_ino);
7613 #endif
7614         __put_user(host_st->st_mode, &target_st->st_mode);
7615         __put_user(host_st->st_nlink, &target_st->st_nlink);
7616         __put_user(host_st->st_uid, &target_st->st_uid);
7617         __put_user(host_st->st_gid, &target_st->st_gid);
7618         __put_user(host_st->st_rdev, &target_st->st_rdev);
7619         __put_user(host_st->st_size, &target_st->st_size);
7620         __put_user(host_st->st_blksize, &target_st->st_blksize);
7621         __put_user(host_st->st_blocks, &target_st->st_blocks);
7622         __put_user(host_st->st_atime, &target_st->target_st_atime);
7623         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7624         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7625 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7626         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7627         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7628         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7629 #endif
7630         unlock_user_struct(target_st, target_addr, 1);
7631     } else
7632 #endif
7633     {
7634 #if defined(TARGET_HAS_STRUCT_STAT64)
7635         struct target_stat64 *target_st;
7636 #else
7637         struct target_stat *target_st;
7638 #endif
7639 
7640         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7641             return -TARGET_EFAULT;
7642         memset(target_st, 0, sizeof(*target_st));
7643         __put_user(host_st->st_dev, &target_st->st_dev);
7644         __put_user(host_st->st_ino, &target_st->st_ino);
7645 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7646         __put_user(host_st->st_ino, &target_st->__st_ino);
7647 #endif
7648         __put_user(host_st->st_mode, &target_st->st_mode);
7649         __put_user(host_st->st_nlink, &target_st->st_nlink);
7650         __put_user(host_st->st_uid, &target_st->st_uid);
7651         __put_user(host_st->st_gid, &target_st->st_gid);
7652         __put_user(host_st->st_rdev, &target_st->st_rdev);
7653         /* XXX: better use of kernel struct */
7654         __put_user(host_st->st_size, &target_st->st_size);
7655         __put_user(host_st->st_blksize, &target_st->st_blksize);
7656         __put_user(host_st->st_blocks, &target_st->st_blocks);
7657         __put_user(host_st->st_atime, &target_st->target_st_atime);
7658         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7659         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7660 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7661         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7662         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7663         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7664 #endif
7665         unlock_user_struct(target_st, target_addr, 1);
7666     }
7667 
7668     return 0;
7669 }
7670 #endif
7671 
7672 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7673 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7674                                             abi_ulong target_addr)
7675 {
7676     struct target_statx *target_stx;
7677 
7678     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7679         return -TARGET_EFAULT;
7680     }
7681     memset(target_stx, 0, sizeof(*target_stx));
7682 
7683     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7684     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7685     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7686     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7687     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7688     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7689     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7690     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7691     __put_user(host_stx->stx_size, &target_stx->stx_size);
7692     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7693     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7694     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7695     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7696     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7697     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7698     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7699     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7700     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7701     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7702     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7703     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7704     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7705     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7706 
7707     unlock_user_struct(target_stx, target_addr, 1);
7708 
7709     return 0;
7710 }
7711 #endif
7712 
7713 static int do_sys_futex(int *uaddr, int op, int val,
7714                          const struct timespec *timeout, int *uaddr2,
7715                          int val3)
7716 {
7717 #if HOST_LONG_BITS == 64
7718 #if defined(__NR_futex)
7719     /* always a 64-bit time_t, it doesn't define _time64 version  */
7720     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7721 
7722 #endif
7723 #else /* HOST_LONG_BITS == 64 */
7724 #if defined(__NR_futex_time64)
7725     if (sizeof(timeout->tv_sec) == 8) {
7726         /* _time64 function on 32bit arch */
7727         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7728     }
7729 #endif
7730 #if defined(__NR_futex)
7731     /* old function on 32bit arch */
7732     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7733 #endif
7734 #endif /* HOST_LONG_BITS == 64 */
7735     g_assert_not_reached();
7736 }
7737 
7738 static int do_safe_futex(int *uaddr, int op, int val,
7739                          const struct timespec *timeout, int *uaddr2,
7740                          int val3)
7741 {
7742 #if HOST_LONG_BITS == 64
7743 #if defined(__NR_futex)
7744     /* always a 64-bit time_t, it doesn't define _time64 version  */
7745     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7746 #endif
7747 #else /* HOST_LONG_BITS == 64 */
7748 #if defined(__NR_futex_time64)
7749     if (sizeof(timeout->tv_sec) == 8) {
7750         /* _time64 function on 32bit arch */
7751         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7752                                            val3));
7753     }
7754 #endif
7755 #if defined(__NR_futex)
7756     /* old function on 32bit arch */
7757     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7758 #endif
7759 #endif /* HOST_LONG_BITS == 64 */
7760     return -TARGET_ENOSYS;
7761 }
7762 
7763 /* ??? Using host futex calls even when target atomic operations
7764    are not really atomic probably breaks things.  However implementing
7765    futexes locally would make futexes shared between multiple processes
7766    tricky.  However they're probably useless because guest atomic
7767    operations won't work either.  */
7768 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7769 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7770                     int op, int val, target_ulong timeout,
7771                     target_ulong uaddr2, int val3)
7772 {
7773     struct timespec ts, *pts = NULL;
7774     void *haddr2 = NULL;
7775     int base_op;
7776 
7777     /* We assume FUTEX_* constants are the same on both host and target. */
7778 #ifdef FUTEX_CMD_MASK
7779     base_op = op & FUTEX_CMD_MASK;
7780 #else
7781     base_op = op;
7782 #endif
7783     switch (base_op) {
7784     case FUTEX_WAIT:
7785     case FUTEX_WAIT_BITSET:
7786         val = tswap32(val);
7787         break;
7788     case FUTEX_WAIT_REQUEUE_PI:
7789         val = tswap32(val);
7790         haddr2 = g2h(cpu, uaddr2);
7791         break;
7792     case FUTEX_LOCK_PI:
7793     case FUTEX_LOCK_PI2:
7794         break;
7795     case FUTEX_WAKE:
7796     case FUTEX_WAKE_BITSET:
7797     case FUTEX_TRYLOCK_PI:
7798     case FUTEX_UNLOCK_PI:
7799         timeout = 0;
7800         break;
7801     case FUTEX_FD:
7802         val = target_to_host_signal(val);
7803         timeout = 0;
7804         break;
7805     case FUTEX_CMP_REQUEUE:
7806     case FUTEX_CMP_REQUEUE_PI:
7807         val3 = tswap32(val3);
7808         /* fall through */
7809     case FUTEX_REQUEUE:
7810     case FUTEX_WAKE_OP:
7811         /*
7812          * For these, the 4th argument is not TIMEOUT, but VAL2.
7813          * But the prototype of do_safe_futex takes a pointer, so
7814          * insert casts to satisfy the compiler.  We do not need
7815          * to tswap VAL2 since it's not compared to guest memory.
7816           */
7817         pts = (struct timespec *)(uintptr_t)timeout;
7818         timeout = 0;
7819         haddr2 = g2h(cpu, uaddr2);
7820         break;
7821     default:
7822         return -TARGET_ENOSYS;
7823     }
7824     if (timeout) {
7825         pts = &ts;
7826         if (time64
7827             ? target_to_host_timespec64(pts, timeout)
7828             : target_to_host_timespec(pts, timeout)) {
7829             return -TARGET_EFAULT;
7830         }
7831     }
7832     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7833 }
7834 #endif
7835 
7836 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7837 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7838                                      abi_long handle, abi_long mount_id,
7839                                      abi_long flags)
7840 {
7841     struct file_handle *target_fh;
7842     struct file_handle *fh;
7843     int mid = 0;
7844     abi_long ret;
7845     char *name;
7846     unsigned int size, total_size;
7847 
7848     if (get_user_s32(size, handle)) {
7849         return -TARGET_EFAULT;
7850     }
7851 
7852     name = lock_user_string(pathname);
7853     if (!name) {
7854         return -TARGET_EFAULT;
7855     }
7856 
7857     total_size = sizeof(struct file_handle) + size;
7858     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7859     if (!target_fh) {
7860         unlock_user(name, pathname, 0);
7861         return -TARGET_EFAULT;
7862     }
7863 
7864     fh = g_malloc0(total_size);
7865     fh->handle_bytes = size;
7866 
7867     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7868     unlock_user(name, pathname, 0);
7869 
7870     /* man name_to_handle_at(2):
7871      * Other than the use of the handle_bytes field, the caller should treat
7872      * the file_handle structure as an opaque data type
7873      */
7874 
7875     memcpy(target_fh, fh, total_size);
7876     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7877     target_fh->handle_type = tswap32(fh->handle_type);
7878     g_free(fh);
7879     unlock_user(target_fh, handle, total_size);
7880 
7881     if (put_user_s32(mid, mount_id)) {
7882         return -TARGET_EFAULT;
7883     }
7884 
7885     return ret;
7886 
7887 }
7888 #endif
7889 
7890 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7891 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7892                                      abi_long flags)
7893 {
7894     struct file_handle *target_fh;
7895     struct file_handle *fh;
7896     unsigned int size, total_size;
7897     abi_long ret;
7898 
7899     if (get_user_s32(size, handle)) {
7900         return -TARGET_EFAULT;
7901     }
7902 
7903     total_size = sizeof(struct file_handle) + size;
7904     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7905     if (!target_fh) {
7906         return -TARGET_EFAULT;
7907     }
7908 
7909     fh = g_memdup(target_fh, total_size);
7910     fh->handle_bytes = size;
7911     fh->handle_type = tswap32(target_fh->handle_type);
7912 
7913     ret = get_errno(open_by_handle_at(mount_fd, fh,
7914                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7915 
7916     g_free(fh);
7917 
7918     unlock_user(target_fh, handle, total_size);
7919 
7920     return ret;
7921 }
7922 #endif
7923 
7924 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7925 
7926 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7927 {
7928     int host_flags;
7929     target_sigset_t *target_mask;
7930     sigset_t host_mask;
7931     abi_long ret;
7932 
7933     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7934         return -TARGET_EINVAL;
7935     }
7936     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7937         return -TARGET_EFAULT;
7938     }
7939 
7940     target_to_host_sigset(&host_mask, target_mask);
7941 
7942     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7943 
7944     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7945     if (ret >= 0) {
7946         fd_trans_register(ret, &target_signalfd_trans);
7947     }
7948 
7949     unlock_user_struct(target_mask, mask, 0);
7950 
7951     return ret;
7952 }
7953 #endif
7954 
7955 /* Map host to target signal numbers for the wait family of syscalls.
7956    Assume all other status bits are the same.  */
7957 int host_to_target_waitstatus(int status)
7958 {
7959     if (WIFSIGNALED(status)) {
7960         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7961     }
7962     if (WIFSTOPPED(status)) {
7963         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7964                | (status & 0xff);
7965     }
7966     return status;
7967 }
7968 
7969 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7970 {
7971     CPUState *cpu = env_cpu(cpu_env);
7972     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7973     int i;
7974 
7975     for (i = 0; i < bprm->argc; i++) {
7976         size_t len = strlen(bprm->argv[i]) + 1;
7977 
7978         if (write(fd, bprm->argv[i], len) != len) {
7979             return -1;
7980         }
7981     }
7982 
7983     return 0;
7984 }
7985 
7986 static int open_self_maps(CPUArchState *cpu_env, int fd)
7987 {
7988     CPUState *cpu = env_cpu(cpu_env);
7989     TaskState *ts = cpu->opaque;
7990     GSList *map_info = read_self_maps();
7991     GSList *s;
7992     int count;
7993 
7994     for (s = map_info; s; s = g_slist_next(s)) {
7995         MapInfo *e = (MapInfo *) s->data;
7996 
7997         if (h2g_valid(e->start)) {
7998             unsigned long min = e->start;
7999             unsigned long max = e->end;
8000             int flags = page_get_flags(h2g(min));
8001             const char *path;
8002 
8003             max = h2g_valid(max - 1) ?
8004                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8005 
8006             if (page_check_range(h2g(min), max - min, flags) == -1) {
8007                 continue;
8008             }
8009 
8010 #ifdef TARGET_HPPA
8011             if (h2g(max) == ts->info->stack_limit) {
8012 #else
8013             if (h2g(min) == ts->info->stack_limit) {
8014 #endif
8015                 path = "[stack]";
8016             } else {
8017                 path = e->path;
8018             }
8019 
8020             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8021                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8022                             h2g(min), h2g(max - 1) + 1,
8023                             (flags & PAGE_READ) ? 'r' : '-',
8024                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8025                             (flags & PAGE_EXEC) ? 'x' : '-',
8026                             e->is_priv ? 'p' : 's',
8027                             (uint64_t) e->offset, e->dev, e->inode);
8028             if (path) {
8029                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8030             } else {
8031                 dprintf(fd, "\n");
8032             }
8033         }
8034     }
8035 
8036     free_self_maps(map_info);
8037 
8038 #ifdef TARGET_VSYSCALL_PAGE
8039     /*
8040      * We only support execution from the vsyscall page.
8041      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8042      */
8043     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8044                     " --xp 00000000 00:00 0",
8045                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8046     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8047 #endif
8048 
8049     return 0;
8050 }
8051 
8052 static int open_self_stat(CPUArchState *cpu_env, int fd)
8053 {
8054     CPUState *cpu = env_cpu(cpu_env);
8055     TaskState *ts = cpu->opaque;
8056     g_autoptr(GString) buf = g_string_new(NULL);
8057     int i;
8058 
8059     for (i = 0; i < 44; i++) {
8060         if (i == 0) {
8061             /* pid */
8062             g_string_printf(buf, FMT_pid " ", getpid());
8063         } else if (i == 1) {
8064             /* app name */
8065             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8066             bin = bin ? bin + 1 : ts->bprm->argv[0];
8067             g_string_printf(buf, "(%.15s) ", bin);
8068         } else if (i == 3) {
8069             /* ppid */
8070             g_string_printf(buf, FMT_pid " ", getppid());
8071         } else if (i == 21) {
8072             /* starttime */
8073             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8074         } else if (i == 27) {
8075             /* stack bottom */
8076             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8077         } else {
8078             /* for the rest, there is MasterCard */
8079             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8080         }
8081 
8082         if (write(fd, buf->str, buf->len) != buf->len) {
8083             return -1;
8084         }
8085     }
8086 
8087     return 0;
8088 }
8089 
8090 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8091 {
8092     CPUState *cpu = env_cpu(cpu_env);
8093     TaskState *ts = cpu->opaque;
8094     abi_ulong auxv = ts->info->saved_auxv;
8095     abi_ulong len = ts->info->auxv_len;
8096     char *ptr;
8097 
8098     /*
8099      * Auxiliary vector is stored in target process stack.
8100      * read in whole auxv vector and copy it to file
8101      */
8102     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8103     if (ptr != NULL) {
8104         while (len > 0) {
8105             ssize_t r;
8106             r = write(fd, ptr, len);
8107             if (r <= 0) {
8108                 break;
8109             }
8110             len -= r;
8111             ptr += r;
8112         }
8113         lseek(fd, 0, SEEK_SET);
8114         unlock_user(ptr, auxv, len);
8115     }
8116 
8117     return 0;
8118 }
8119 
8120 static int is_proc_myself(const char *filename, const char *entry)
8121 {
8122     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8123         filename += strlen("/proc/");
8124         if (!strncmp(filename, "self/", strlen("self/"))) {
8125             filename += strlen("self/");
8126         } else if (*filename >= '1' && *filename <= '9') {
8127             char myself[80];
8128             snprintf(myself, sizeof(myself), "%d/", getpid());
8129             if (!strncmp(filename, myself, strlen(myself))) {
8130                 filename += strlen(myself);
8131             } else {
8132                 return 0;
8133             }
8134         } else {
8135             return 0;
8136         }
8137         if (!strcmp(filename, entry)) {
8138             return 1;
8139         }
8140     }
8141     return 0;
8142 }
8143 
8144 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8145                       const char *fmt, int code)
8146 {
8147     if (logfile) {
8148         CPUState *cs = env_cpu(env);
8149 
8150         fprintf(logfile, fmt, code);
8151         fprintf(logfile, "Failing executable: %s\n", exec_path);
8152         cpu_dump_state(cs, logfile, 0);
8153         open_self_maps(env, fileno(logfile));
8154     }
8155 }
8156 
8157 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8158 {
8159     /* dump to console */
8160     excp_dump_file(stderr, env, fmt, code);
8161 
8162     /* dump to log file */
8163     if (qemu_log_separate()) {
8164         FILE *logfile = qemu_log_trylock();
8165 
8166         excp_dump_file(logfile, env, fmt, code);
8167         qemu_log_unlock(logfile);
8168     }
8169 }
8170 
8171 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8172     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8173 static int is_proc(const char *filename, const char *entry)
8174 {
8175     return strcmp(filename, entry) == 0;
8176 }
8177 #endif
8178 
8179 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8180 static int open_net_route(CPUArchState *cpu_env, int fd)
8181 {
8182     FILE *fp;
8183     char *line = NULL;
8184     size_t len = 0;
8185     ssize_t read;
8186 
8187     fp = fopen("/proc/net/route", "r");
8188     if (fp == NULL) {
8189         return -1;
8190     }
8191 
8192     /* read header */
8193 
8194     read = getline(&line, &len, fp);
8195     dprintf(fd, "%s", line);
8196 
8197     /* read routes */
8198 
8199     while ((read = getline(&line, &len, fp)) != -1) {
8200         char iface[16];
8201         uint32_t dest, gw, mask;
8202         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8203         int fields;
8204 
8205         fields = sscanf(line,
8206                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8207                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8208                         &mask, &mtu, &window, &irtt);
8209         if (fields != 11) {
8210             continue;
8211         }
8212         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8213                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8214                 metric, tswap32(mask), mtu, window, irtt);
8215     }
8216 
8217     free(line);
8218     fclose(fp);
8219 
8220     return 0;
8221 }
8222 #endif
8223 
8224 #if defined(TARGET_SPARC)
8225 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8226 {
8227     dprintf(fd, "type\t\t: sun4u\n");
8228     return 0;
8229 }
8230 #endif
8231 
8232 #if defined(TARGET_HPPA)
8233 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8234 {
8235     int i, num_cpus;
8236 
8237     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8238     for (i = 0; i < num_cpus; i++) {
8239         dprintf(fd, "processor\t: %d\n", i);
8240         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8241         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8242         dprintf(fd, "capabilities\t: os32\n");
8243         dprintf(fd, "model\t\t: 9000/778/B160L - "
8244                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8245     }
8246     return 0;
8247 }
8248 #endif
8249 
8250 #if defined(TARGET_M68K)
8251 static int open_hardware(CPUArchState *cpu_env, int fd)
8252 {
8253     dprintf(fd, "Model:\t\tqemu-m68k\n");
8254     return 0;
8255 }
8256 #endif
8257 
8258 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8259 {
8260     struct fake_open {
8261         const char *filename;
8262         int (*fill)(CPUArchState *cpu_env, int fd);
8263         int (*cmp)(const char *s1, const char *s2);
8264     };
8265     const struct fake_open *fake_open;
8266     static const struct fake_open fakes[] = {
8267         { "maps", open_self_maps, is_proc_myself },
8268         { "stat", open_self_stat, is_proc_myself },
8269         { "auxv", open_self_auxv, is_proc_myself },
8270         { "cmdline", open_self_cmdline, is_proc_myself },
8271 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8272         { "/proc/net/route", open_net_route, is_proc },
8273 #endif
8274 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8275         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8276 #endif
8277 #if defined(TARGET_M68K)
8278         { "/proc/hardware", open_hardware, is_proc },
8279 #endif
8280         { NULL, NULL, NULL }
8281     };
8282 
8283     if (is_proc_myself(pathname, "exe")) {
8284         return safe_openat(dirfd, exec_path, flags, mode);
8285     }
8286 
8287     for (fake_open = fakes; fake_open->filename; fake_open++) {
8288         if (fake_open->cmp(pathname, fake_open->filename)) {
8289             break;
8290         }
8291     }
8292 
8293     if (fake_open->filename) {
8294         const char *tmpdir;
8295         char filename[PATH_MAX];
8296         int fd, r;
8297 
8298         fd = memfd_create("qemu-open", 0);
8299         if (fd < 0) {
8300             if (errno != ENOSYS) {
8301                 return fd;
8302             }
8303             /* create temporary file to map stat to */
8304             tmpdir = getenv("TMPDIR");
8305             if (!tmpdir)
8306                 tmpdir = "/tmp";
8307             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8308             fd = mkstemp(filename);
8309             if (fd < 0) {
8310                 return fd;
8311             }
8312             unlink(filename);
8313         }
8314 
8315         if ((r = fake_open->fill(cpu_env, fd))) {
8316             int e = errno;
8317             close(fd);
8318             errno = e;
8319             return r;
8320         }
8321         lseek(fd, 0, SEEK_SET);
8322 
8323         return fd;
8324     }
8325 
8326     return safe_openat(dirfd, path(pathname), flags, mode);
8327 }
8328 
8329 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8330                        abi_long pathname, abi_long guest_argp,
8331                        abi_long guest_envp, int flags)
8332 {
8333     int ret;
8334     char **argp, **envp;
8335     int argc, envc;
8336     abi_ulong gp;
8337     abi_ulong addr;
8338     char **q;
8339     void *p;
8340 
8341     argc = 0;
8342 
8343     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8344         if (get_user_ual(addr, gp)) {
8345             return -TARGET_EFAULT;
8346         }
8347         if (!addr) {
8348             break;
8349         }
8350         argc++;
8351     }
8352     envc = 0;
8353     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8354         if (get_user_ual(addr, gp)) {
8355             return -TARGET_EFAULT;
8356         }
8357         if (!addr) {
8358             break;
8359         }
8360         envc++;
8361     }
8362 
8363     argp = g_new0(char *, argc + 1);
8364     envp = g_new0(char *, envc + 1);
8365 
8366     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8367         if (get_user_ual(addr, gp)) {
8368             goto execve_efault;
8369         }
8370         if (!addr) {
8371             break;
8372         }
8373         *q = lock_user_string(addr);
8374         if (!*q) {
8375             goto execve_efault;
8376         }
8377     }
8378     *q = NULL;
8379 
8380     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8381         if (get_user_ual(addr, gp)) {
8382             goto execve_efault;
8383         }
8384         if (!addr) {
8385             break;
8386         }
8387         *q = lock_user_string(addr);
8388         if (!*q) {
8389             goto execve_efault;
8390         }
8391     }
8392     *q = NULL;
8393 
8394     /*
8395      * Although execve() is not an interruptible syscall it is
8396      * a special case where we must use the safe_syscall wrapper:
8397      * if we allow a signal to happen before we make the host
8398      * syscall then we will 'lose' it, because at the point of
8399      * execve the process leaves QEMU's control. So we use the
8400      * safe syscall wrapper to ensure that we either take the
8401      * signal as a guest signal, or else it does not happen
8402      * before the execve completes and makes it the other
8403      * program's problem.
8404      */
8405     p = lock_user_string(pathname);
8406     if (!p) {
8407         goto execve_efault;
8408     }
8409 
8410     if (is_proc_myself(p, "exe")) {
8411         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8412     } else {
8413         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8414     }
8415 
8416     unlock_user(p, pathname, 0);
8417 
8418     goto execve_end;
8419 
8420 execve_efault:
8421     ret = -TARGET_EFAULT;
8422 
8423 execve_end:
8424     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8425         if (get_user_ual(addr, gp) || !addr) {
8426             break;
8427         }
8428         unlock_user(*q, addr, 0);
8429     }
8430     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8431         if (get_user_ual(addr, gp) || !addr) {
8432             break;
8433         }
8434         unlock_user(*q, addr, 0);
8435     }
8436 
8437     g_free(argp);
8438     g_free(envp);
8439     return ret;
8440 }
8441 
8442 #define TIMER_MAGIC 0x0caf0000
8443 #define TIMER_MAGIC_MASK 0xffff0000
8444 
8445 /* Convert QEMU provided timer ID back to internal 16bit index format */
8446 static target_timer_t get_timer_id(abi_long arg)
8447 {
8448     target_timer_t timerid = arg;
8449 
8450     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8451         return -TARGET_EINVAL;
8452     }
8453 
8454     timerid &= 0xffff;
8455 
8456     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8457         return -TARGET_EINVAL;
8458     }
8459 
8460     return timerid;
8461 }
8462 
8463 static int target_to_host_cpu_mask(unsigned long *host_mask,
8464                                    size_t host_size,
8465                                    abi_ulong target_addr,
8466                                    size_t target_size)
8467 {
8468     unsigned target_bits = sizeof(abi_ulong) * 8;
8469     unsigned host_bits = sizeof(*host_mask) * 8;
8470     abi_ulong *target_mask;
8471     unsigned i, j;
8472 
8473     assert(host_size >= target_size);
8474 
8475     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8476     if (!target_mask) {
8477         return -TARGET_EFAULT;
8478     }
8479     memset(host_mask, 0, host_size);
8480 
8481     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8482         unsigned bit = i * target_bits;
8483         abi_ulong val;
8484 
8485         __get_user(val, &target_mask[i]);
8486         for (j = 0; j < target_bits; j++, bit++) {
8487             if (val & (1UL << j)) {
8488                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8489             }
8490         }
8491     }
8492 
8493     unlock_user(target_mask, target_addr, 0);
8494     return 0;
8495 }
8496 
8497 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8498                                    size_t host_size,
8499                                    abi_ulong target_addr,
8500                                    size_t target_size)
8501 {
8502     unsigned target_bits = sizeof(abi_ulong) * 8;
8503     unsigned host_bits = sizeof(*host_mask) * 8;
8504     abi_ulong *target_mask;
8505     unsigned i, j;
8506 
8507     assert(host_size >= target_size);
8508 
8509     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8510     if (!target_mask) {
8511         return -TARGET_EFAULT;
8512     }
8513 
8514     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8515         unsigned bit = i * target_bits;
8516         abi_ulong val = 0;
8517 
8518         for (j = 0; j < target_bits; j++, bit++) {
8519             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8520                 val |= 1UL << j;
8521             }
8522         }
8523         __put_user(val, &target_mask[i]);
8524     }
8525 
8526     unlock_user(target_mask, target_addr, target_size);
8527     return 0;
8528 }
8529 
8530 #ifdef TARGET_NR_getdents
8531 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8532 {
8533     g_autofree void *hdirp = NULL;
8534     void *tdirp;
8535     int hlen, hoff, toff;
8536     int hreclen, treclen;
8537     off64_t prev_diroff = 0;
8538 
8539     hdirp = g_try_malloc(count);
8540     if (!hdirp) {
8541         return -TARGET_ENOMEM;
8542     }
8543 
8544 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8545     hlen = sys_getdents(dirfd, hdirp, count);
8546 #else
8547     hlen = sys_getdents64(dirfd, hdirp, count);
8548 #endif
8549 
8550     hlen = get_errno(hlen);
8551     if (is_error(hlen)) {
8552         return hlen;
8553     }
8554 
8555     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8556     if (!tdirp) {
8557         return -TARGET_EFAULT;
8558     }
8559 
8560     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8561 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8562         struct linux_dirent *hde = hdirp + hoff;
8563 #else
8564         struct linux_dirent64 *hde = hdirp + hoff;
8565 #endif
8566         struct target_dirent *tde = tdirp + toff;
8567         int namelen;
8568         uint8_t type;
8569 
8570         namelen = strlen(hde->d_name);
8571         hreclen = hde->d_reclen;
8572         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8573         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8574 
8575         if (toff + treclen > count) {
8576             /*
8577              * If the host struct is smaller than the target struct, or
8578              * requires less alignment and thus packs into less space,
8579              * then the host can return more entries than we can pass
8580              * on to the guest.
8581              */
8582             if (toff == 0) {
8583                 toff = -TARGET_EINVAL; /* result buffer is too small */
8584                 break;
8585             }
8586             /*
8587              * Return what we have, resetting the file pointer to the
8588              * location of the first record not returned.
8589              */
8590             lseek64(dirfd, prev_diroff, SEEK_SET);
8591             break;
8592         }
8593 
8594         prev_diroff = hde->d_off;
8595         tde->d_ino = tswapal(hde->d_ino);
8596         tde->d_off = tswapal(hde->d_off);
8597         tde->d_reclen = tswap16(treclen);
8598         memcpy(tde->d_name, hde->d_name, namelen + 1);
8599 
8600         /*
8601          * The getdents type is in what was formerly a padding byte at the
8602          * end of the structure.
8603          */
8604 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8605         type = *((uint8_t *)hde + hreclen - 1);
8606 #else
8607         type = hde->d_type;
8608 #endif
8609         *((uint8_t *)tde + treclen - 1) = type;
8610     }
8611 
8612     unlock_user(tdirp, arg2, toff);
8613     return toff;
8614 }
8615 #endif /* TARGET_NR_getdents */
8616 
8617 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8618 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8619 {
8620     g_autofree void *hdirp = NULL;
8621     void *tdirp;
8622     int hlen, hoff, toff;
8623     int hreclen, treclen;
8624     off64_t prev_diroff = 0;
8625 
8626     hdirp = g_try_malloc(count);
8627     if (!hdirp) {
8628         return -TARGET_ENOMEM;
8629     }
8630 
8631     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8632     if (is_error(hlen)) {
8633         return hlen;
8634     }
8635 
8636     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8637     if (!tdirp) {
8638         return -TARGET_EFAULT;
8639     }
8640 
8641     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8642         struct linux_dirent64 *hde = hdirp + hoff;
8643         struct target_dirent64 *tde = tdirp + toff;
8644         int namelen;
8645 
8646         namelen = strlen(hde->d_name) + 1;
8647         hreclen = hde->d_reclen;
8648         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8649         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8650 
8651         if (toff + treclen > count) {
8652             /*
8653              * If the host struct is smaller than the target struct, or
8654              * requires less alignment and thus packs into less space,
8655              * then the host can return more entries than we can pass
8656              * on to the guest.
8657              */
8658             if (toff == 0) {
8659                 toff = -TARGET_EINVAL; /* result buffer is too small */
8660                 break;
8661             }
8662             /*
8663              * Return what we have, resetting the file pointer to the
8664              * location of the first record not returned.
8665              */
8666             lseek64(dirfd, prev_diroff, SEEK_SET);
8667             break;
8668         }
8669 
8670         prev_diroff = hde->d_off;
8671         tde->d_ino = tswap64(hde->d_ino);
8672         tde->d_off = tswap64(hde->d_off);
8673         tde->d_reclen = tswap16(treclen);
8674         tde->d_type = hde->d_type;
8675         memcpy(tde->d_name, hde->d_name, namelen);
8676     }
8677 
8678     unlock_user(tdirp, arg2, toff);
8679     return toff;
8680 }
8681 #endif /* TARGET_NR_getdents64 */
8682 
8683 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8684 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8685 #endif
8686 
8687 /* This is an internal helper for do_syscall so that it is easier
8688  * to have a single return point, so that actions, such as logging
8689  * of syscall results, can be performed.
8690  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8691  */
8692 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8693                             abi_long arg2, abi_long arg3, abi_long arg4,
8694                             abi_long arg5, abi_long arg6, abi_long arg7,
8695                             abi_long arg8)
8696 {
8697     CPUState *cpu = env_cpu(cpu_env);
8698     abi_long ret;
8699 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8700     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8701     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8702     || defined(TARGET_NR_statx)
8703     struct stat st;
8704 #endif
8705 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8706     || defined(TARGET_NR_fstatfs)
8707     struct statfs stfs;
8708 #endif
8709     void *p;
8710 
8711     switch(num) {
8712     case TARGET_NR_exit:
8713         /* In old applications this may be used to implement _exit(2).
8714            However in threaded applications it is used for thread termination,
8715            and _exit_group is used for application termination.
8716            Do thread termination if we have more then one thread.  */
8717 
8718         if (block_signals()) {
8719             return -QEMU_ERESTARTSYS;
8720         }
8721 
8722         pthread_mutex_lock(&clone_lock);
8723 
8724         if (CPU_NEXT(first_cpu)) {
8725             TaskState *ts = cpu->opaque;
8726 
8727             if (ts->child_tidptr) {
8728                 put_user_u32(0, ts->child_tidptr);
8729                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8730                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8731             }
8732 
8733             object_unparent(OBJECT(cpu));
8734             object_unref(OBJECT(cpu));
8735             /*
8736              * At this point the CPU should be unrealized and removed
8737              * from cpu lists. We can clean-up the rest of the thread
8738              * data without the lock held.
8739              */
8740 
8741             pthread_mutex_unlock(&clone_lock);
8742 
8743             thread_cpu = NULL;
8744             g_free(ts);
8745             rcu_unregister_thread();
8746             pthread_exit(NULL);
8747         }
8748 
8749         pthread_mutex_unlock(&clone_lock);
8750         preexit_cleanup(cpu_env, arg1);
8751         _exit(arg1);
8752         return 0; /* avoid warning */
8753     case TARGET_NR_read:
8754         if (arg2 == 0 && arg3 == 0) {
8755             return get_errno(safe_read(arg1, 0, 0));
8756         } else {
8757             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8758                 return -TARGET_EFAULT;
8759             ret = get_errno(safe_read(arg1, p, arg3));
8760             if (ret >= 0 &&
8761                 fd_trans_host_to_target_data(arg1)) {
8762                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8763             }
8764             unlock_user(p, arg2, ret);
8765         }
8766         return ret;
8767     case TARGET_NR_write:
8768         if (arg2 == 0 && arg3 == 0) {
8769             return get_errno(safe_write(arg1, 0, 0));
8770         }
8771         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8772             return -TARGET_EFAULT;
8773         if (fd_trans_target_to_host_data(arg1)) {
8774             void *copy = g_malloc(arg3);
8775             memcpy(copy, p, arg3);
8776             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8777             if (ret >= 0) {
8778                 ret = get_errno(safe_write(arg1, copy, ret));
8779             }
8780             g_free(copy);
8781         } else {
8782             ret = get_errno(safe_write(arg1, p, arg3));
8783         }
8784         unlock_user(p, arg2, 0);
8785         return ret;
8786 
8787 #ifdef TARGET_NR_open
8788     case TARGET_NR_open:
8789         if (!(p = lock_user_string(arg1)))
8790             return -TARGET_EFAULT;
8791         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8792                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8793                                   arg3));
8794         fd_trans_unregister(ret);
8795         unlock_user(p, arg1, 0);
8796         return ret;
8797 #endif
8798     case TARGET_NR_openat:
8799         if (!(p = lock_user_string(arg2)))
8800             return -TARGET_EFAULT;
8801         ret = get_errno(do_openat(cpu_env, arg1, p,
8802                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8803                                   arg4));
8804         fd_trans_unregister(ret);
8805         unlock_user(p, arg2, 0);
8806         return ret;
8807 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8808     case TARGET_NR_name_to_handle_at:
8809         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8810         return ret;
8811 #endif
8812 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8813     case TARGET_NR_open_by_handle_at:
8814         ret = do_open_by_handle_at(arg1, arg2, arg3);
8815         fd_trans_unregister(ret);
8816         return ret;
8817 #endif
8818 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
8819     case TARGET_NR_pidfd_open:
8820         return get_errno(pidfd_open(arg1, arg2));
8821 #endif
8822 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
8823     case TARGET_NR_pidfd_send_signal:
8824         {
8825             siginfo_t uinfo, *puinfo;
8826 
8827             if (arg3) {
8828                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8829                 if (!p) {
8830                     return -TARGET_EFAULT;
8831                  }
8832                  target_to_host_siginfo(&uinfo, p);
8833                  unlock_user(p, arg3, 0);
8834                  puinfo = &uinfo;
8835             } else {
8836                  puinfo = NULL;
8837             }
8838             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
8839                                               puinfo, arg4));
8840         }
8841         return ret;
8842 #endif
8843 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
8844     case TARGET_NR_pidfd_getfd:
8845         return get_errno(pidfd_getfd(arg1, arg2, arg3));
8846 #endif
8847     case TARGET_NR_close:
8848         fd_trans_unregister(arg1);
8849         return get_errno(close(arg1));
8850 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
8851     case TARGET_NR_close_range:
8852         ret = get_errno(sys_close_range(arg1, arg2, arg3));
8853         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
8854             abi_long fd, maxfd;
8855             maxfd = MIN(arg2, target_fd_max);
8856             for (fd = arg1; fd < maxfd; fd++) {
8857                 fd_trans_unregister(fd);
8858             }
8859         }
8860         return ret;
8861 #endif
8862 
8863     case TARGET_NR_brk:
8864         return do_brk(arg1);
8865 #ifdef TARGET_NR_fork
8866     case TARGET_NR_fork:
8867         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8868 #endif
8869 #ifdef TARGET_NR_waitpid
8870     case TARGET_NR_waitpid:
8871         {
8872             int status;
8873             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8874             if (!is_error(ret) && arg2 && ret
8875                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8876                 return -TARGET_EFAULT;
8877         }
8878         return ret;
8879 #endif
8880 #ifdef TARGET_NR_waitid
8881     case TARGET_NR_waitid:
8882         {
8883             siginfo_t info;
8884             info.si_pid = 0;
8885             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8886             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8887                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8888                     return -TARGET_EFAULT;
8889                 host_to_target_siginfo(p, &info);
8890                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8891             }
8892         }
8893         return ret;
8894 #endif
8895 #ifdef TARGET_NR_creat /* not on alpha */
8896     case TARGET_NR_creat:
8897         if (!(p = lock_user_string(arg1)))
8898             return -TARGET_EFAULT;
8899         ret = get_errno(creat(p, arg2));
8900         fd_trans_unregister(ret);
8901         unlock_user(p, arg1, 0);
8902         return ret;
8903 #endif
8904 #ifdef TARGET_NR_link
8905     case TARGET_NR_link:
8906         {
8907             void * p2;
8908             p = lock_user_string(arg1);
8909             p2 = lock_user_string(arg2);
8910             if (!p || !p2)
8911                 ret = -TARGET_EFAULT;
8912             else
8913                 ret = get_errno(link(p, p2));
8914             unlock_user(p2, arg2, 0);
8915             unlock_user(p, arg1, 0);
8916         }
8917         return ret;
8918 #endif
8919 #if defined(TARGET_NR_linkat)
8920     case TARGET_NR_linkat:
8921         {
8922             void * p2 = NULL;
8923             if (!arg2 || !arg4)
8924                 return -TARGET_EFAULT;
8925             p  = lock_user_string(arg2);
8926             p2 = lock_user_string(arg4);
8927             if (!p || !p2)
8928                 ret = -TARGET_EFAULT;
8929             else
8930                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8931             unlock_user(p, arg2, 0);
8932             unlock_user(p2, arg4, 0);
8933         }
8934         return ret;
8935 #endif
8936 #ifdef TARGET_NR_unlink
8937     case TARGET_NR_unlink:
8938         if (!(p = lock_user_string(arg1)))
8939             return -TARGET_EFAULT;
8940         ret = get_errno(unlink(p));
8941         unlock_user(p, arg1, 0);
8942         return ret;
8943 #endif
8944 #if defined(TARGET_NR_unlinkat)
8945     case TARGET_NR_unlinkat:
8946         if (!(p = lock_user_string(arg2)))
8947             return -TARGET_EFAULT;
8948         ret = get_errno(unlinkat(arg1, p, arg3));
8949         unlock_user(p, arg2, 0);
8950         return ret;
8951 #endif
8952     case TARGET_NR_execveat:
8953         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
8954     case TARGET_NR_execve:
8955         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
8956     case TARGET_NR_chdir:
8957         if (!(p = lock_user_string(arg1)))
8958             return -TARGET_EFAULT;
8959         ret = get_errno(chdir(p));
8960         unlock_user(p, arg1, 0);
8961         return ret;
8962 #ifdef TARGET_NR_time
8963     case TARGET_NR_time:
8964         {
8965             time_t host_time;
8966             ret = get_errno(time(&host_time));
8967             if (!is_error(ret)
8968                 && arg1
8969                 && put_user_sal(host_time, arg1))
8970                 return -TARGET_EFAULT;
8971         }
8972         return ret;
8973 #endif
8974 #ifdef TARGET_NR_mknod
8975     case TARGET_NR_mknod:
8976         if (!(p = lock_user_string(arg1)))
8977             return -TARGET_EFAULT;
8978         ret = get_errno(mknod(p, arg2, arg3));
8979         unlock_user(p, arg1, 0);
8980         return ret;
8981 #endif
8982 #if defined(TARGET_NR_mknodat)
8983     case TARGET_NR_mknodat:
8984         if (!(p = lock_user_string(arg2)))
8985             return -TARGET_EFAULT;
8986         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8987         unlock_user(p, arg2, 0);
8988         return ret;
8989 #endif
8990 #ifdef TARGET_NR_chmod
8991     case TARGET_NR_chmod:
8992         if (!(p = lock_user_string(arg1)))
8993             return -TARGET_EFAULT;
8994         ret = get_errno(chmod(p, arg2));
8995         unlock_user(p, arg1, 0);
8996         return ret;
8997 #endif
8998 #ifdef TARGET_NR_lseek
8999     case TARGET_NR_lseek:
9000         return get_errno(lseek(arg1, arg2, arg3));
9001 #endif
9002 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9003     /* Alpha specific */
9004     case TARGET_NR_getxpid:
9005         cpu_env->ir[IR_A4] = getppid();
9006         return get_errno(getpid());
9007 #endif
9008 #ifdef TARGET_NR_getpid
9009     case TARGET_NR_getpid:
9010         return get_errno(getpid());
9011 #endif
9012     case TARGET_NR_mount:
9013         {
9014             /* need to look at the data field */
9015             void *p2, *p3;
9016 
9017             if (arg1) {
9018                 p = lock_user_string(arg1);
9019                 if (!p) {
9020                     return -TARGET_EFAULT;
9021                 }
9022             } else {
9023                 p = NULL;
9024             }
9025 
9026             p2 = lock_user_string(arg2);
9027             if (!p2) {
9028                 if (arg1) {
9029                     unlock_user(p, arg1, 0);
9030                 }
9031                 return -TARGET_EFAULT;
9032             }
9033 
9034             if (arg3) {
9035                 p3 = lock_user_string(arg3);
9036                 if (!p3) {
9037                     if (arg1) {
9038                         unlock_user(p, arg1, 0);
9039                     }
9040                     unlock_user(p2, arg2, 0);
9041                     return -TARGET_EFAULT;
9042                 }
9043             } else {
9044                 p3 = NULL;
9045             }
9046 
9047             /* FIXME - arg5 should be locked, but it isn't clear how to
9048              * do that since it's not guaranteed to be a NULL-terminated
9049              * string.
9050              */
9051             if (!arg5) {
9052                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9053             } else {
9054                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9055             }
9056             ret = get_errno(ret);
9057 
9058             if (arg1) {
9059                 unlock_user(p, arg1, 0);
9060             }
9061             unlock_user(p2, arg2, 0);
9062             if (arg3) {
9063                 unlock_user(p3, arg3, 0);
9064             }
9065         }
9066         return ret;
9067 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9068 #if defined(TARGET_NR_umount)
9069     case TARGET_NR_umount:
9070 #endif
9071 #if defined(TARGET_NR_oldumount)
9072     case TARGET_NR_oldumount:
9073 #endif
9074         if (!(p = lock_user_string(arg1)))
9075             return -TARGET_EFAULT;
9076         ret = get_errno(umount(p));
9077         unlock_user(p, arg1, 0);
9078         return ret;
9079 #endif
9080 #ifdef TARGET_NR_stime /* not on alpha */
9081     case TARGET_NR_stime:
9082         {
9083             struct timespec ts;
9084             ts.tv_nsec = 0;
9085             if (get_user_sal(ts.tv_sec, arg1)) {
9086                 return -TARGET_EFAULT;
9087             }
9088             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9089         }
9090 #endif
9091 #ifdef TARGET_NR_alarm /* not on alpha */
9092     case TARGET_NR_alarm:
9093         return alarm(arg1);
9094 #endif
9095 #ifdef TARGET_NR_pause /* not on alpha */
9096     case TARGET_NR_pause:
9097         if (!block_signals()) {
9098             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9099         }
9100         return -TARGET_EINTR;
9101 #endif
9102 #ifdef TARGET_NR_utime
9103     case TARGET_NR_utime:
9104         {
9105             struct utimbuf tbuf, *host_tbuf;
9106             struct target_utimbuf *target_tbuf;
9107             if (arg2) {
9108                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9109                     return -TARGET_EFAULT;
9110                 tbuf.actime = tswapal(target_tbuf->actime);
9111                 tbuf.modtime = tswapal(target_tbuf->modtime);
9112                 unlock_user_struct(target_tbuf, arg2, 0);
9113                 host_tbuf = &tbuf;
9114             } else {
9115                 host_tbuf = NULL;
9116             }
9117             if (!(p = lock_user_string(arg1)))
9118                 return -TARGET_EFAULT;
9119             ret = get_errno(utime(p, host_tbuf));
9120             unlock_user(p, arg1, 0);
9121         }
9122         return ret;
9123 #endif
9124 #ifdef TARGET_NR_utimes
9125     case TARGET_NR_utimes:
9126         {
9127             struct timeval *tvp, tv[2];
9128             if (arg2) {
9129                 if (copy_from_user_timeval(&tv[0], arg2)
9130                     || copy_from_user_timeval(&tv[1],
9131                                               arg2 + sizeof(struct target_timeval)))
9132                     return -TARGET_EFAULT;
9133                 tvp = tv;
9134             } else {
9135                 tvp = NULL;
9136             }
9137             if (!(p = lock_user_string(arg1)))
9138                 return -TARGET_EFAULT;
9139             ret = get_errno(utimes(p, tvp));
9140             unlock_user(p, arg1, 0);
9141         }
9142         return ret;
9143 #endif
9144 #if defined(TARGET_NR_futimesat)
9145     case TARGET_NR_futimesat:
9146         {
9147             struct timeval *tvp, tv[2];
9148             if (arg3) {
9149                 if (copy_from_user_timeval(&tv[0], arg3)
9150                     || copy_from_user_timeval(&tv[1],
9151                                               arg3 + sizeof(struct target_timeval)))
9152                     return -TARGET_EFAULT;
9153                 tvp = tv;
9154             } else {
9155                 tvp = NULL;
9156             }
9157             if (!(p = lock_user_string(arg2))) {
9158                 return -TARGET_EFAULT;
9159             }
9160             ret = get_errno(futimesat(arg1, path(p), tvp));
9161             unlock_user(p, arg2, 0);
9162         }
9163         return ret;
9164 #endif
9165 #ifdef TARGET_NR_access
9166     case TARGET_NR_access:
9167         if (!(p = lock_user_string(arg1))) {
9168             return -TARGET_EFAULT;
9169         }
9170         ret = get_errno(access(path(p), arg2));
9171         unlock_user(p, arg1, 0);
9172         return ret;
9173 #endif
9174 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9175     case TARGET_NR_faccessat:
9176         if (!(p = lock_user_string(arg2))) {
9177             return -TARGET_EFAULT;
9178         }
9179         ret = get_errno(faccessat(arg1, p, arg3, 0));
9180         unlock_user(p, arg2, 0);
9181         return ret;
9182 #endif
9183 #if defined(TARGET_NR_faccessat2)
9184     case TARGET_NR_faccessat2:
9185         if (!(p = lock_user_string(arg2))) {
9186             return -TARGET_EFAULT;
9187         }
9188         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9189         unlock_user(p, arg2, 0);
9190         return ret;
9191 #endif
9192 #ifdef TARGET_NR_nice /* not on alpha */
9193     case TARGET_NR_nice:
9194         return get_errno(nice(arg1));
9195 #endif
9196     case TARGET_NR_sync:
9197         sync();
9198         return 0;
9199 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9200     case TARGET_NR_syncfs:
9201         return get_errno(syncfs(arg1));
9202 #endif
9203     case TARGET_NR_kill:
9204         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9205 #ifdef TARGET_NR_rename
9206     case TARGET_NR_rename:
9207         {
9208             void *p2;
9209             p = lock_user_string(arg1);
9210             p2 = lock_user_string(arg2);
9211             if (!p || !p2)
9212                 ret = -TARGET_EFAULT;
9213             else
9214                 ret = get_errno(rename(p, p2));
9215             unlock_user(p2, arg2, 0);
9216             unlock_user(p, arg1, 0);
9217         }
9218         return ret;
9219 #endif
9220 #if defined(TARGET_NR_renameat)
9221     case TARGET_NR_renameat:
9222         {
9223             void *p2;
9224             p  = lock_user_string(arg2);
9225             p2 = lock_user_string(arg4);
9226             if (!p || !p2)
9227                 ret = -TARGET_EFAULT;
9228             else
9229                 ret = get_errno(renameat(arg1, p, arg3, p2));
9230             unlock_user(p2, arg4, 0);
9231             unlock_user(p, arg2, 0);
9232         }
9233         return ret;
9234 #endif
9235 #if defined(TARGET_NR_renameat2)
9236     case TARGET_NR_renameat2:
9237         {
9238             void *p2;
9239             p  = lock_user_string(arg2);
9240             p2 = lock_user_string(arg4);
9241             if (!p || !p2) {
9242                 ret = -TARGET_EFAULT;
9243             } else {
9244                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9245             }
9246             unlock_user(p2, arg4, 0);
9247             unlock_user(p, arg2, 0);
9248         }
9249         return ret;
9250 #endif
9251 #ifdef TARGET_NR_mkdir
9252     case TARGET_NR_mkdir:
9253         if (!(p = lock_user_string(arg1)))
9254             return -TARGET_EFAULT;
9255         ret = get_errno(mkdir(p, arg2));
9256         unlock_user(p, arg1, 0);
9257         return ret;
9258 #endif
9259 #if defined(TARGET_NR_mkdirat)
9260     case TARGET_NR_mkdirat:
9261         if (!(p = lock_user_string(arg2)))
9262             return -TARGET_EFAULT;
9263         ret = get_errno(mkdirat(arg1, p, arg3));
9264         unlock_user(p, arg2, 0);
9265         return ret;
9266 #endif
9267 #ifdef TARGET_NR_rmdir
9268     case TARGET_NR_rmdir:
9269         if (!(p = lock_user_string(arg1)))
9270             return -TARGET_EFAULT;
9271         ret = get_errno(rmdir(p));
9272         unlock_user(p, arg1, 0);
9273         return ret;
9274 #endif
9275     case TARGET_NR_dup:
9276         ret = get_errno(dup(arg1));
9277         if (ret >= 0) {
9278             fd_trans_dup(arg1, ret);
9279         }
9280         return ret;
9281 #ifdef TARGET_NR_pipe
9282     case TARGET_NR_pipe:
9283         return do_pipe(cpu_env, arg1, 0, 0);
9284 #endif
9285 #ifdef TARGET_NR_pipe2
9286     case TARGET_NR_pipe2:
9287         return do_pipe(cpu_env, arg1,
9288                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9289 #endif
9290     case TARGET_NR_times:
9291         {
9292             struct target_tms *tmsp;
9293             struct tms tms;
9294             ret = get_errno(times(&tms));
9295             if (arg1) {
9296                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9297                 if (!tmsp)
9298                     return -TARGET_EFAULT;
9299                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9300                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9301                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9302                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9303             }
9304             if (!is_error(ret))
9305                 ret = host_to_target_clock_t(ret);
9306         }
9307         return ret;
9308     case TARGET_NR_acct:
9309         if (arg1 == 0) {
9310             ret = get_errno(acct(NULL));
9311         } else {
9312             if (!(p = lock_user_string(arg1))) {
9313                 return -TARGET_EFAULT;
9314             }
9315             ret = get_errno(acct(path(p)));
9316             unlock_user(p, arg1, 0);
9317         }
9318         return ret;
9319 #ifdef TARGET_NR_umount2
9320     case TARGET_NR_umount2:
9321         if (!(p = lock_user_string(arg1)))
9322             return -TARGET_EFAULT;
9323         ret = get_errno(umount2(p, arg2));
9324         unlock_user(p, arg1, 0);
9325         return ret;
9326 #endif
9327     case TARGET_NR_ioctl:
9328         return do_ioctl(arg1, arg2, arg3);
9329 #ifdef TARGET_NR_fcntl
9330     case TARGET_NR_fcntl:
9331         return do_fcntl(arg1, arg2, arg3);
9332 #endif
9333     case TARGET_NR_setpgid:
9334         return get_errno(setpgid(arg1, arg2));
9335     case TARGET_NR_umask:
9336         return get_errno(umask(arg1));
9337     case TARGET_NR_chroot:
9338         if (!(p = lock_user_string(arg1)))
9339             return -TARGET_EFAULT;
9340         ret = get_errno(chroot(p));
9341         unlock_user(p, arg1, 0);
9342         return ret;
9343 #ifdef TARGET_NR_dup2
9344     case TARGET_NR_dup2:
9345         ret = get_errno(dup2(arg1, arg2));
9346         if (ret >= 0) {
9347             fd_trans_dup(arg1, arg2);
9348         }
9349         return ret;
9350 #endif
9351 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9352     case TARGET_NR_dup3:
9353     {
9354         int host_flags;
9355 
9356         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9357             return -EINVAL;
9358         }
9359         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9360         ret = get_errno(dup3(arg1, arg2, host_flags));
9361         if (ret >= 0) {
9362             fd_trans_dup(arg1, arg2);
9363         }
9364         return ret;
9365     }
9366 #endif
9367 #ifdef TARGET_NR_getppid /* not on alpha */
9368     case TARGET_NR_getppid:
9369         return get_errno(getppid());
9370 #endif
9371 #ifdef TARGET_NR_getpgrp
9372     case TARGET_NR_getpgrp:
9373         return get_errno(getpgrp());
9374 #endif
9375     case TARGET_NR_setsid:
9376         return get_errno(setsid());
9377 #ifdef TARGET_NR_sigaction
9378     case TARGET_NR_sigaction:
9379         {
9380 #if defined(TARGET_MIPS)
9381 	    struct target_sigaction act, oact, *pact, *old_act;
9382 
9383 	    if (arg2) {
9384                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9385                     return -TARGET_EFAULT;
9386 		act._sa_handler = old_act->_sa_handler;
9387 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9388 		act.sa_flags = old_act->sa_flags;
9389 		unlock_user_struct(old_act, arg2, 0);
9390 		pact = &act;
9391 	    } else {
9392 		pact = NULL;
9393 	    }
9394 
9395         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9396 
9397 	    if (!is_error(ret) && arg3) {
9398                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9399                     return -TARGET_EFAULT;
9400 		old_act->_sa_handler = oact._sa_handler;
9401 		old_act->sa_flags = oact.sa_flags;
9402 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9403 		old_act->sa_mask.sig[1] = 0;
9404 		old_act->sa_mask.sig[2] = 0;
9405 		old_act->sa_mask.sig[3] = 0;
9406 		unlock_user_struct(old_act, arg3, 1);
9407 	    }
9408 #else
9409             struct target_old_sigaction *old_act;
9410             struct target_sigaction act, oact, *pact;
9411             if (arg2) {
9412                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9413                     return -TARGET_EFAULT;
9414                 act._sa_handler = old_act->_sa_handler;
9415                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9416                 act.sa_flags = old_act->sa_flags;
9417 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9418                 act.sa_restorer = old_act->sa_restorer;
9419 #endif
9420                 unlock_user_struct(old_act, arg2, 0);
9421                 pact = &act;
9422             } else {
9423                 pact = NULL;
9424             }
9425             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9426             if (!is_error(ret) && arg3) {
9427                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9428                     return -TARGET_EFAULT;
9429                 old_act->_sa_handler = oact._sa_handler;
9430                 old_act->sa_mask = oact.sa_mask.sig[0];
9431                 old_act->sa_flags = oact.sa_flags;
9432 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9433                 old_act->sa_restorer = oact.sa_restorer;
9434 #endif
9435                 unlock_user_struct(old_act, arg3, 1);
9436             }
9437 #endif
9438         }
9439         return ret;
9440 #endif
9441     case TARGET_NR_rt_sigaction:
9442         {
9443             /*
9444              * For Alpha and SPARC this is a 5 argument syscall, with
9445              * a 'restorer' parameter which must be copied into the
9446              * sa_restorer field of the sigaction struct.
9447              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9448              * and arg5 is the sigsetsize.
9449              */
9450 #if defined(TARGET_ALPHA)
9451             target_ulong sigsetsize = arg4;
9452             target_ulong restorer = arg5;
9453 #elif defined(TARGET_SPARC)
9454             target_ulong restorer = arg4;
9455             target_ulong sigsetsize = arg5;
9456 #else
9457             target_ulong sigsetsize = arg4;
9458             target_ulong restorer = 0;
9459 #endif
9460             struct target_sigaction *act = NULL;
9461             struct target_sigaction *oact = NULL;
9462 
9463             if (sigsetsize != sizeof(target_sigset_t)) {
9464                 return -TARGET_EINVAL;
9465             }
9466             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9467                 return -TARGET_EFAULT;
9468             }
9469             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9470                 ret = -TARGET_EFAULT;
9471             } else {
9472                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9473                 if (oact) {
9474                     unlock_user_struct(oact, arg3, 1);
9475                 }
9476             }
9477             if (act) {
9478                 unlock_user_struct(act, arg2, 0);
9479             }
9480         }
9481         return ret;
9482 #ifdef TARGET_NR_sgetmask /* not on alpha */
9483     case TARGET_NR_sgetmask:
9484         {
9485             sigset_t cur_set;
9486             abi_ulong target_set;
9487             ret = do_sigprocmask(0, NULL, &cur_set);
9488             if (!ret) {
9489                 host_to_target_old_sigset(&target_set, &cur_set);
9490                 ret = target_set;
9491             }
9492         }
9493         return ret;
9494 #endif
9495 #ifdef TARGET_NR_ssetmask /* not on alpha */
9496     case TARGET_NR_ssetmask:
9497         {
9498             sigset_t set, oset;
9499             abi_ulong target_set = arg1;
9500             target_to_host_old_sigset(&set, &target_set);
9501             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9502             if (!ret) {
9503                 host_to_target_old_sigset(&target_set, &oset);
9504                 ret = target_set;
9505             }
9506         }
9507         return ret;
9508 #endif
9509 #ifdef TARGET_NR_sigprocmask
9510     case TARGET_NR_sigprocmask:
9511         {
9512 #if defined(TARGET_ALPHA)
9513             sigset_t set, oldset;
9514             abi_ulong mask;
9515             int how;
9516 
9517             switch (arg1) {
9518             case TARGET_SIG_BLOCK:
9519                 how = SIG_BLOCK;
9520                 break;
9521             case TARGET_SIG_UNBLOCK:
9522                 how = SIG_UNBLOCK;
9523                 break;
9524             case TARGET_SIG_SETMASK:
9525                 how = SIG_SETMASK;
9526                 break;
9527             default:
9528                 return -TARGET_EINVAL;
9529             }
9530             mask = arg2;
9531             target_to_host_old_sigset(&set, &mask);
9532 
9533             ret = do_sigprocmask(how, &set, &oldset);
9534             if (!is_error(ret)) {
9535                 host_to_target_old_sigset(&mask, &oldset);
9536                 ret = mask;
9537                 cpu_env->ir[IR_V0] = 0; /* force no error */
9538             }
9539 #else
9540             sigset_t set, oldset, *set_ptr;
9541             int how;
9542 
9543             if (arg2) {
9544                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9545                 if (!p) {
9546                     return -TARGET_EFAULT;
9547                 }
9548                 target_to_host_old_sigset(&set, p);
9549                 unlock_user(p, arg2, 0);
9550                 set_ptr = &set;
9551                 switch (arg1) {
9552                 case TARGET_SIG_BLOCK:
9553                     how = SIG_BLOCK;
9554                     break;
9555                 case TARGET_SIG_UNBLOCK:
9556                     how = SIG_UNBLOCK;
9557                     break;
9558                 case TARGET_SIG_SETMASK:
9559                     how = SIG_SETMASK;
9560                     break;
9561                 default:
9562                     return -TARGET_EINVAL;
9563                 }
9564             } else {
9565                 how = 0;
9566                 set_ptr = NULL;
9567             }
9568             ret = do_sigprocmask(how, set_ptr, &oldset);
9569             if (!is_error(ret) && arg3) {
9570                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9571                     return -TARGET_EFAULT;
9572                 host_to_target_old_sigset(p, &oldset);
9573                 unlock_user(p, arg3, sizeof(target_sigset_t));
9574             }
9575 #endif
9576         }
9577         return ret;
9578 #endif
9579     case TARGET_NR_rt_sigprocmask:
9580         {
9581             int how = arg1;
9582             sigset_t set, oldset, *set_ptr;
9583 
9584             if (arg4 != sizeof(target_sigset_t)) {
9585                 return -TARGET_EINVAL;
9586             }
9587 
9588             if (arg2) {
9589                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9590                 if (!p) {
9591                     return -TARGET_EFAULT;
9592                 }
9593                 target_to_host_sigset(&set, p);
9594                 unlock_user(p, arg2, 0);
9595                 set_ptr = &set;
9596                 switch(how) {
9597                 case TARGET_SIG_BLOCK:
9598                     how = SIG_BLOCK;
9599                     break;
9600                 case TARGET_SIG_UNBLOCK:
9601                     how = SIG_UNBLOCK;
9602                     break;
9603                 case TARGET_SIG_SETMASK:
9604                     how = SIG_SETMASK;
9605                     break;
9606                 default:
9607                     return -TARGET_EINVAL;
9608                 }
9609             } else {
9610                 how = 0;
9611                 set_ptr = NULL;
9612             }
9613             ret = do_sigprocmask(how, set_ptr, &oldset);
9614             if (!is_error(ret) && arg3) {
9615                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9616                     return -TARGET_EFAULT;
9617                 host_to_target_sigset(p, &oldset);
9618                 unlock_user(p, arg3, sizeof(target_sigset_t));
9619             }
9620         }
9621         return ret;
9622 #ifdef TARGET_NR_sigpending
9623     case TARGET_NR_sigpending:
9624         {
9625             sigset_t set;
9626             ret = get_errno(sigpending(&set));
9627             if (!is_error(ret)) {
9628                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9629                     return -TARGET_EFAULT;
9630                 host_to_target_old_sigset(p, &set);
9631                 unlock_user(p, arg1, sizeof(target_sigset_t));
9632             }
9633         }
9634         return ret;
9635 #endif
9636     case TARGET_NR_rt_sigpending:
9637         {
9638             sigset_t set;
9639 
9640             /* Yes, this check is >, not != like most. We follow the kernel's
9641              * logic and it does it like this because it implements
9642              * NR_sigpending through the same code path, and in that case
9643              * the old_sigset_t is smaller in size.
9644              */
9645             if (arg2 > sizeof(target_sigset_t)) {
9646                 return -TARGET_EINVAL;
9647             }
9648 
9649             ret = get_errno(sigpending(&set));
9650             if (!is_error(ret)) {
9651                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9652                     return -TARGET_EFAULT;
9653                 host_to_target_sigset(p, &set);
9654                 unlock_user(p, arg1, sizeof(target_sigset_t));
9655             }
9656         }
9657         return ret;
9658 #ifdef TARGET_NR_sigsuspend
9659     case TARGET_NR_sigsuspend:
9660         {
9661             sigset_t *set;
9662 
9663 #if defined(TARGET_ALPHA)
9664             TaskState *ts = cpu->opaque;
9665             /* target_to_host_old_sigset will bswap back */
9666             abi_ulong mask = tswapal(arg1);
9667             set = &ts->sigsuspend_mask;
9668             target_to_host_old_sigset(set, &mask);
9669 #else
9670             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9671             if (ret != 0) {
9672                 return ret;
9673             }
9674 #endif
9675             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9676             finish_sigsuspend_mask(ret);
9677         }
9678         return ret;
9679 #endif
9680     case TARGET_NR_rt_sigsuspend:
9681         {
9682             sigset_t *set;
9683 
9684             ret = process_sigsuspend_mask(&set, arg1, arg2);
9685             if (ret != 0) {
9686                 return ret;
9687             }
9688             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9689             finish_sigsuspend_mask(ret);
9690         }
9691         return ret;
9692 #ifdef TARGET_NR_rt_sigtimedwait
9693     case TARGET_NR_rt_sigtimedwait:
9694         {
9695             sigset_t set;
9696             struct timespec uts, *puts;
9697             siginfo_t uinfo;
9698 
9699             if (arg4 != sizeof(target_sigset_t)) {
9700                 return -TARGET_EINVAL;
9701             }
9702 
9703             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9704                 return -TARGET_EFAULT;
9705             target_to_host_sigset(&set, p);
9706             unlock_user(p, arg1, 0);
9707             if (arg3) {
9708                 puts = &uts;
9709                 if (target_to_host_timespec(puts, arg3)) {
9710                     return -TARGET_EFAULT;
9711                 }
9712             } else {
9713                 puts = NULL;
9714             }
9715             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9716                                                  SIGSET_T_SIZE));
9717             if (!is_error(ret)) {
9718                 if (arg2) {
9719                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9720                                   0);
9721                     if (!p) {
9722                         return -TARGET_EFAULT;
9723                     }
9724                     host_to_target_siginfo(p, &uinfo);
9725                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9726                 }
9727                 ret = host_to_target_signal(ret);
9728             }
9729         }
9730         return ret;
9731 #endif
9732 #ifdef TARGET_NR_rt_sigtimedwait_time64
9733     case TARGET_NR_rt_sigtimedwait_time64:
9734         {
9735             sigset_t set;
9736             struct timespec uts, *puts;
9737             siginfo_t uinfo;
9738 
9739             if (arg4 != sizeof(target_sigset_t)) {
9740                 return -TARGET_EINVAL;
9741             }
9742 
9743             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9744             if (!p) {
9745                 return -TARGET_EFAULT;
9746             }
9747             target_to_host_sigset(&set, p);
9748             unlock_user(p, arg1, 0);
9749             if (arg3) {
9750                 puts = &uts;
9751                 if (target_to_host_timespec64(puts, arg3)) {
9752                     return -TARGET_EFAULT;
9753                 }
9754             } else {
9755                 puts = NULL;
9756             }
9757             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9758                                                  SIGSET_T_SIZE));
9759             if (!is_error(ret)) {
9760                 if (arg2) {
9761                     p = lock_user(VERIFY_WRITE, arg2,
9762                                   sizeof(target_siginfo_t), 0);
9763                     if (!p) {
9764                         return -TARGET_EFAULT;
9765                     }
9766                     host_to_target_siginfo(p, &uinfo);
9767                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9768                 }
9769                 ret = host_to_target_signal(ret);
9770             }
9771         }
9772         return ret;
9773 #endif
9774     case TARGET_NR_rt_sigqueueinfo:
9775         {
9776             siginfo_t uinfo;
9777 
9778             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9779             if (!p) {
9780                 return -TARGET_EFAULT;
9781             }
9782             target_to_host_siginfo(&uinfo, p);
9783             unlock_user(p, arg3, 0);
9784             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
9785         }
9786         return ret;
9787     case TARGET_NR_rt_tgsigqueueinfo:
9788         {
9789             siginfo_t uinfo;
9790 
9791             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9792             if (!p) {
9793                 return -TARGET_EFAULT;
9794             }
9795             target_to_host_siginfo(&uinfo, p);
9796             unlock_user(p, arg4, 0);
9797             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
9798         }
9799         return ret;
9800 #ifdef TARGET_NR_sigreturn
9801     case TARGET_NR_sigreturn:
9802         if (block_signals()) {
9803             return -QEMU_ERESTARTSYS;
9804         }
9805         return do_sigreturn(cpu_env);
9806 #endif
9807     case TARGET_NR_rt_sigreturn:
9808         if (block_signals()) {
9809             return -QEMU_ERESTARTSYS;
9810         }
9811         return do_rt_sigreturn(cpu_env);
9812     case TARGET_NR_sethostname:
9813         if (!(p = lock_user_string(arg1)))
9814             return -TARGET_EFAULT;
9815         ret = get_errno(sethostname(p, arg2));
9816         unlock_user(p, arg1, 0);
9817         return ret;
9818 #ifdef TARGET_NR_setrlimit
9819     case TARGET_NR_setrlimit:
9820         {
9821             int resource = target_to_host_resource(arg1);
9822             struct target_rlimit *target_rlim;
9823             struct rlimit rlim;
9824             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9825                 return -TARGET_EFAULT;
9826             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9827             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9828             unlock_user_struct(target_rlim, arg2, 0);
9829             /*
9830              * If we just passed through resource limit settings for memory then
9831              * they would also apply to QEMU's own allocations, and QEMU will
9832              * crash or hang or die if its allocations fail. Ideally we would
9833              * track the guest allocations in QEMU and apply the limits ourselves.
9834              * For now, just tell the guest the call succeeded but don't actually
9835              * limit anything.
9836              */
9837             if (resource != RLIMIT_AS &&
9838                 resource != RLIMIT_DATA &&
9839                 resource != RLIMIT_STACK) {
9840                 return get_errno(setrlimit(resource, &rlim));
9841             } else {
9842                 return 0;
9843             }
9844         }
9845 #endif
9846 #ifdef TARGET_NR_getrlimit
9847     case TARGET_NR_getrlimit:
9848         {
9849             int resource = target_to_host_resource(arg1);
9850             struct target_rlimit *target_rlim;
9851             struct rlimit rlim;
9852 
9853             ret = get_errno(getrlimit(resource, &rlim));
9854             if (!is_error(ret)) {
9855                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9856                     return -TARGET_EFAULT;
9857                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9858                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9859                 unlock_user_struct(target_rlim, arg2, 1);
9860             }
9861         }
9862         return ret;
9863 #endif
9864     case TARGET_NR_getrusage:
9865         {
9866             struct rusage rusage;
9867             ret = get_errno(getrusage(arg1, &rusage));
9868             if (!is_error(ret)) {
9869                 ret = host_to_target_rusage(arg2, &rusage);
9870             }
9871         }
9872         return ret;
9873 #if defined(TARGET_NR_gettimeofday)
9874     case TARGET_NR_gettimeofday:
9875         {
9876             struct timeval tv;
9877             struct timezone tz;
9878 
9879             ret = get_errno(gettimeofday(&tv, &tz));
9880             if (!is_error(ret)) {
9881                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9882                     return -TARGET_EFAULT;
9883                 }
9884                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9885                     return -TARGET_EFAULT;
9886                 }
9887             }
9888         }
9889         return ret;
9890 #endif
9891 #if defined(TARGET_NR_settimeofday)
9892     case TARGET_NR_settimeofday:
9893         {
9894             struct timeval tv, *ptv = NULL;
9895             struct timezone tz, *ptz = NULL;
9896 
9897             if (arg1) {
9898                 if (copy_from_user_timeval(&tv, arg1)) {
9899                     return -TARGET_EFAULT;
9900                 }
9901                 ptv = &tv;
9902             }
9903 
9904             if (arg2) {
9905                 if (copy_from_user_timezone(&tz, arg2)) {
9906                     return -TARGET_EFAULT;
9907                 }
9908                 ptz = &tz;
9909             }
9910 
9911             return get_errno(settimeofday(ptv, ptz));
9912         }
9913 #endif
9914 #if defined(TARGET_NR_select)
9915     case TARGET_NR_select:
9916 #if defined(TARGET_WANT_NI_OLD_SELECT)
9917         /* some architectures used to have old_select here
9918          * but now ENOSYS it.
9919          */
9920         ret = -TARGET_ENOSYS;
9921 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9922         ret = do_old_select(arg1);
9923 #else
9924         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9925 #endif
9926         return ret;
9927 #endif
9928 #ifdef TARGET_NR_pselect6
9929     case TARGET_NR_pselect6:
9930         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9931 #endif
9932 #ifdef TARGET_NR_pselect6_time64
9933     case TARGET_NR_pselect6_time64:
9934         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9935 #endif
9936 #ifdef TARGET_NR_symlink
9937     case TARGET_NR_symlink:
9938         {
9939             void *p2;
9940             p = lock_user_string(arg1);
9941             p2 = lock_user_string(arg2);
9942             if (!p || !p2)
9943                 ret = -TARGET_EFAULT;
9944             else
9945                 ret = get_errno(symlink(p, p2));
9946             unlock_user(p2, arg2, 0);
9947             unlock_user(p, arg1, 0);
9948         }
9949         return ret;
9950 #endif
9951 #if defined(TARGET_NR_symlinkat)
9952     case TARGET_NR_symlinkat:
9953         {
9954             void *p2;
9955             p  = lock_user_string(arg1);
9956             p2 = lock_user_string(arg3);
9957             if (!p || !p2)
9958                 ret = -TARGET_EFAULT;
9959             else
9960                 ret = get_errno(symlinkat(p, arg2, p2));
9961             unlock_user(p2, arg3, 0);
9962             unlock_user(p, arg1, 0);
9963         }
9964         return ret;
9965 #endif
9966 #ifdef TARGET_NR_readlink
9967     case TARGET_NR_readlink:
9968         {
9969             void *p2;
9970             p = lock_user_string(arg1);
9971             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9972             if (!p || !p2) {
9973                 ret = -TARGET_EFAULT;
9974             } else if (!arg3) {
9975                 /* Short circuit this for the magic exe check. */
9976                 ret = -TARGET_EINVAL;
9977             } else if (is_proc_myself((const char *)p, "exe")) {
9978                 char real[PATH_MAX], *temp;
9979                 temp = realpath(exec_path, real);
9980                 /* Return value is # of bytes that we wrote to the buffer. */
9981                 if (temp == NULL) {
9982                     ret = get_errno(-1);
9983                 } else {
9984                     /* Don't worry about sign mismatch as earlier mapping
9985                      * logic would have thrown a bad address error. */
9986                     ret = MIN(strlen(real), arg3);
9987                     /* We cannot NUL terminate the string. */
9988                     memcpy(p2, real, ret);
9989                 }
9990             } else {
9991                 ret = get_errno(readlink(path(p), p2, arg3));
9992             }
9993             unlock_user(p2, arg2, ret);
9994             unlock_user(p, arg1, 0);
9995         }
9996         return ret;
9997 #endif
9998 #if defined(TARGET_NR_readlinkat)
9999     case TARGET_NR_readlinkat:
10000         {
10001             void *p2;
10002             p  = lock_user_string(arg2);
10003             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10004             if (!p || !p2) {
10005                 ret = -TARGET_EFAULT;
10006             } else if (!arg4) {
10007                 /* Short circuit this for the magic exe check. */
10008                 ret = -TARGET_EINVAL;
10009             } else if (is_proc_myself((const char *)p, "exe")) {
10010                 char real[PATH_MAX], *temp;
10011                 temp = realpath(exec_path, real);
10012                 /* Return value is # of bytes that we wrote to the buffer. */
10013                 if (temp == NULL) {
10014                     ret = get_errno(-1);
10015                 } else {
10016                     /* Don't worry about sign mismatch as earlier mapping
10017                      * logic would have thrown a bad address error. */
10018                     ret = MIN(strlen(real), arg4);
10019                     /* We cannot NUL terminate the string. */
10020                     memcpy(p2, real, ret);
10021                 }
10022             } else {
10023                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10024             }
10025             unlock_user(p2, arg3, ret);
10026             unlock_user(p, arg2, 0);
10027         }
10028         return ret;
10029 #endif
10030 #ifdef TARGET_NR_swapon
10031     case TARGET_NR_swapon:
10032         if (!(p = lock_user_string(arg1)))
10033             return -TARGET_EFAULT;
10034         ret = get_errno(swapon(p, arg2));
10035         unlock_user(p, arg1, 0);
10036         return ret;
10037 #endif
10038     case TARGET_NR_reboot:
10039         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10040            /* arg4 must be ignored in all other cases */
10041            p = lock_user_string(arg4);
10042            if (!p) {
10043                return -TARGET_EFAULT;
10044            }
10045            ret = get_errno(reboot(arg1, arg2, arg3, p));
10046            unlock_user(p, arg4, 0);
10047         } else {
10048            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10049         }
10050         return ret;
10051 #ifdef TARGET_NR_mmap
10052     case TARGET_NR_mmap:
10053 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10054     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10055     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10056     || defined(TARGET_S390X)
10057         {
10058             abi_ulong *v;
10059             abi_ulong v1, v2, v3, v4, v5, v6;
10060             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10061                 return -TARGET_EFAULT;
10062             v1 = tswapal(v[0]);
10063             v2 = tswapal(v[1]);
10064             v3 = tswapal(v[2]);
10065             v4 = tswapal(v[3]);
10066             v5 = tswapal(v[4]);
10067             v6 = tswapal(v[5]);
10068             unlock_user(v, arg1, 0);
10069             ret = get_errno(target_mmap(v1, v2, v3,
10070                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10071                                         v5, v6));
10072         }
10073 #else
10074         /* mmap pointers are always untagged */
10075         ret = get_errno(target_mmap(arg1, arg2, arg3,
10076                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10077                                     arg5,
10078                                     arg6));
10079 #endif
10080         return ret;
10081 #endif
10082 #ifdef TARGET_NR_mmap2
10083     case TARGET_NR_mmap2:
10084 #ifndef MMAP_SHIFT
10085 #define MMAP_SHIFT 12
10086 #endif
10087         ret = target_mmap(arg1, arg2, arg3,
10088                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10089                           arg5, arg6 << MMAP_SHIFT);
10090         return get_errno(ret);
10091 #endif
10092     case TARGET_NR_munmap:
10093         arg1 = cpu_untagged_addr(cpu, arg1);
10094         return get_errno(target_munmap(arg1, arg2));
10095     case TARGET_NR_mprotect:
10096         arg1 = cpu_untagged_addr(cpu, arg1);
10097         {
10098             TaskState *ts = cpu->opaque;
10099             /* Special hack to detect libc making the stack executable.  */
10100             if ((arg3 & PROT_GROWSDOWN)
10101                 && arg1 >= ts->info->stack_limit
10102                 && arg1 <= ts->info->start_stack) {
10103                 arg3 &= ~PROT_GROWSDOWN;
10104                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10105                 arg1 = ts->info->stack_limit;
10106             }
10107         }
10108         return get_errno(target_mprotect(arg1, arg2, arg3));
10109 #ifdef TARGET_NR_mremap
10110     case TARGET_NR_mremap:
10111         arg1 = cpu_untagged_addr(cpu, arg1);
10112         /* mremap new_addr (arg5) is always untagged */
10113         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10114 #endif
10115         /* ??? msync/mlock/munlock are broken for softmmu.  */
10116 #ifdef TARGET_NR_msync
10117     case TARGET_NR_msync:
10118         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10119 #endif
10120 #ifdef TARGET_NR_mlock
10121     case TARGET_NR_mlock:
10122         return get_errno(mlock(g2h(cpu, arg1), arg2));
10123 #endif
10124 #ifdef TARGET_NR_munlock
10125     case TARGET_NR_munlock:
10126         return get_errno(munlock(g2h(cpu, arg1), arg2));
10127 #endif
10128 #ifdef TARGET_NR_mlockall
10129     case TARGET_NR_mlockall:
10130         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10131 #endif
10132 #ifdef TARGET_NR_munlockall
10133     case TARGET_NR_munlockall:
10134         return get_errno(munlockall());
10135 #endif
10136 #ifdef TARGET_NR_truncate
10137     case TARGET_NR_truncate:
10138         if (!(p = lock_user_string(arg1)))
10139             return -TARGET_EFAULT;
10140         ret = get_errno(truncate(p, arg2));
10141         unlock_user(p, arg1, 0);
10142         return ret;
10143 #endif
10144 #ifdef TARGET_NR_ftruncate
10145     case TARGET_NR_ftruncate:
10146         return get_errno(ftruncate(arg1, arg2));
10147 #endif
10148     case TARGET_NR_fchmod:
10149         return get_errno(fchmod(arg1, arg2));
10150 #if defined(TARGET_NR_fchmodat)
10151     case TARGET_NR_fchmodat:
10152         if (!(p = lock_user_string(arg2)))
10153             return -TARGET_EFAULT;
10154         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10155         unlock_user(p, arg2, 0);
10156         return ret;
10157 #endif
10158     case TARGET_NR_getpriority:
10159         /* Note that negative values are valid for getpriority, so we must
10160            differentiate based on errno settings.  */
10161         errno = 0;
10162         ret = getpriority(arg1, arg2);
10163         if (ret == -1 && errno != 0) {
10164             return -host_to_target_errno(errno);
10165         }
10166 #ifdef TARGET_ALPHA
10167         /* Return value is the unbiased priority.  Signal no error.  */
10168         cpu_env->ir[IR_V0] = 0;
10169 #else
10170         /* Return value is a biased priority to avoid negative numbers.  */
10171         ret = 20 - ret;
10172 #endif
10173         return ret;
10174     case TARGET_NR_setpriority:
10175         return get_errno(setpriority(arg1, arg2, arg3));
10176 #ifdef TARGET_NR_statfs
10177     case TARGET_NR_statfs:
10178         if (!(p = lock_user_string(arg1))) {
10179             return -TARGET_EFAULT;
10180         }
10181         ret = get_errno(statfs(path(p), &stfs));
10182         unlock_user(p, arg1, 0);
10183     convert_statfs:
10184         if (!is_error(ret)) {
10185             struct target_statfs *target_stfs;
10186 
10187             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10188                 return -TARGET_EFAULT;
10189             __put_user(stfs.f_type, &target_stfs->f_type);
10190             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10191             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10192             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10193             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10194             __put_user(stfs.f_files, &target_stfs->f_files);
10195             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10196             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10197             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10198             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10199             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10200 #ifdef _STATFS_F_FLAGS
10201             __put_user(stfs.f_flags, &target_stfs->f_flags);
10202 #else
10203             __put_user(0, &target_stfs->f_flags);
10204 #endif
10205             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10206             unlock_user_struct(target_stfs, arg2, 1);
10207         }
10208         return ret;
10209 #endif
10210 #ifdef TARGET_NR_fstatfs
10211     case TARGET_NR_fstatfs:
10212         ret = get_errno(fstatfs(arg1, &stfs));
10213         goto convert_statfs;
10214 #endif
10215 #ifdef TARGET_NR_statfs64
10216     case TARGET_NR_statfs64:
10217         if (!(p = lock_user_string(arg1))) {
10218             return -TARGET_EFAULT;
10219         }
10220         ret = get_errno(statfs(path(p), &stfs));
10221         unlock_user(p, arg1, 0);
10222     convert_statfs64:
10223         if (!is_error(ret)) {
10224             struct target_statfs64 *target_stfs;
10225 
10226             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10227                 return -TARGET_EFAULT;
10228             __put_user(stfs.f_type, &target_stfs->f_type);
10229             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10230             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10231             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10232             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10233             __put_user(stfs.f_files, &target_stfs->f_files);
10234             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10235             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10236             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10237             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10238             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10239 #ifdef _STATFS_F_FLAGS
10240             __put_user(stfs.f_flags, &target_stfs->f_flags);
10241 #else
10242             __put_user(0, &target_stfs->f_flags);
10243 #endif
10244             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10245             unlock_user_struct(target_stfs, arg3, 1);
10246         }
10247         return ret;
10248     case TARGET_NR_fstatfs64:
10249         ret = get_errno(fstatfs(arg1, &stfs));
10250         goto convert_statfs64;
10251 #endif
10252 #ifdef TARGET_NR_socketcall
10253     case TARGET_NR_socketcall:
10254         return do_socketcall(arg1, arg2);
10255 #endif
10256 #ifdef TARGET_NR_accept
10257     case TARGET_NR_accept:
10258         return do_accept4(arg1, arg2, arg3, 0);
10259 #endif
10260 #ifdef TARGET_NR_accept4
10261     case TARGET_NR_accept4:
10262         return do_accept4(arg1, arg2, arg3, arg4);
10263 #endif
10264 #ifdef TARGET_NR_bind
10265     case TARGET_NR_bind:
10266         return do_bind(arg1, arg2, arg3);
10267 #endif
10268 #ifdef TARGET_NR_connect
10269     case TARGET_NR_connect:
10270         return do_connect(arg1, arg2, arg3);
10271 #endif
10272 #ifdef TARGET_NR_getpeername
10273     case TARGET_NR_getpeername:
10274         return do_getpeername(arg1, arg2, arg3);
10275 #endif
10276 #ifdef TARGET_NR_getsockname
10277     case TARGET_NR_getsockname:
10278         return do_getsockname(arg1, arg2, arg3);
10279 #endif
10280 #ifdef TARGET_NR_getsockopt
10281     case TARGET_NR_getsockopt:
10282         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10283 #endif
10284 #ifdef TARGET_NR_listen
10285     case TARGET_NR_listen:
10286         return get_errno(listen(arg1, arg2));
10287 #endif
10288 #ifdef TARGET_NR_recv
10289     case TARGET_NR_recv:
10290         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10291 #endif
10292 #ifdef TARGET_NR_recvfrom
10293     case TARGET_NR_recvfrom:
10294         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10295 #endif
10296 #ifdef TARGET_NR_recvmsg
10297     case TARGET_NR_recvmsg:
10298         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10299 #endif
10300 #ifdef TARGET_NR_send
10301     case TARGET_NR_send:
10302         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10303 #endif
10304 #ifdef TARGET_NR_sendmsg
10305     case TARGET_NR_sendmsg:
10306         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10307 #endif
10308 #ifdef TARGET_NR_sendmmsg
10309     case TARGET_NR_sendmmsg:
10310         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10311 #endif
10312 #ifdef TARGET_NR_recvmmsg
10313     case TARGET_NR_recvmmsg:
10314         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10315 #endif
10316 #ifdef TARGET_NR_sendto
10317     case TARGET_NR_sendto:
10318         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10319 #endif
10320 #ifdef TARGET_NR_shutdown
10321     case TARGET_NR_shutdown:
10322         return get_errno(shutdown(arg1, arg2));
10323 #endif
10324 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10325     case TARGET_NR_getrandom:
10326         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10327         if (!p) {
10328             return -TARGET_EFAULT;
10329         }
10330         ret = get_errno(getrandom(p, arg2, arg3));
10331         unlock_user(p, arg1, ret);
10332         return ret;
10333 #endif
10334 #ifdef TARGET_NR_socket
10335     case TARGET_NR_socket:
10336         return do_socket(arg1, arg2, arg3);
10337 #endif
10338 #ifdef TARGET_NR_socketpair
10339     case TARGET_NR_socketpair:
10340         return do_socketpair(arg1, arg2, arg3, arg4);
10341 #endif
10342 #ifdef TARGET_NR_setsockopt
10343     case TARGET_NR_setsockopt:
10344         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10345 #endif
10346 #if defined(TARGET_NR_syslog)
10347     case TARGET_NR_syslog:
10348         {
10349             int len = arg2;
10350 
10351             switch (arg1) {
10352             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10353             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10354             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10355             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10356             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10357             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10358             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10359             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10360                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10361             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10362             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10363             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10364                 {
10365                     if (len < 0) {
10366                         return -TARGET_EINVAL;
10367                     }
10368                     if (len == 0) {
10369                         return 0;
10370                     }
10371                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10372                     if (!p) {
10373                         return -TARGET_EFAULT;
10374                     }
10375                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10376                     unlock_user(p, arg2, arg3);
10377                 }
10378                 return ret;
10379             default:
10380                 return -TARGET_EINVAL;
10381             }
10382         }
10383         break;
10384 #endif
10385     case TARGET_NR_setitimer:
10386         {
10387             struct itimerval value, ovalue, *pvalue;
10388 
10389             if (arg2) {
10390                 pvalue = &value;
10391                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10392                     || copy_from_user_timeval(&pvalue->it_value,
10393                                               arg2 + sizeof(struct target_timeval)))
10394                     return -TARGET_EFAULT;
10395             } else {
10396                 pvalue = NULL;
10397             }
10398             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10399             if (!is_error(ret) && arg3) {
10400                 if (copy_to_user_timeval(arg3,
10401                                          &ovalue.it_interval)
10402                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10403                                             &ovalue.it_value))
10404                     return -TARGET_EFAULT;
10405             }
10406         }
10407         return ret;
10408     case TARGET_NR_getitimer:
10409         {
10410             struct itimerval value;
10411 
10412             ret = get_errno(getitimer(arg1, &value));
10413             if (!is_error(ret) && arg2) {
10414                 if (copy_to_user_timeval(arg2,
10415                                          &value.it_interval)
10416                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10417                                             &value.it_value))
10418                     return -TARGET_EFAULT;
10419             }
10420         }
10421         return ret;
10422 #ifdef TARGET_NR_stat
10423     case TARGET_NR_stat:
10424         if (!(p = lock_user_string(arg1))) {
10425             return -TARGET_EFAULT;
10426         }
10427         ret = get_errno(stat(path(p), &st));
10428         unlock_user(p, arg1, 0);
10429         goto do_stat;
10430 #endif
10431 #ifdef TARGET_NR_lstat
10432     case TARGET_NR_lstat:
10433         if (!(p = lock_user_string(arg1))) {
10434             return -TARGET_EFAULT;
10435         }
10436         ret = get_errno(lstat(path(p), &st));
10437         unlock_user(p, arg1, 0);
10438         goto do_stat;
10439 #endif
10440 #ifdef TARGET_NR_fstat
10441     case TARGET_NR_fstat:
10442         {
10443             ret = get_errno(fstat(arg1, &st));
10444 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10445         do_stat:
10446 #endif
10447             if (!is_error(ret)) {
10448                 struct target_stat *target_st;
10449 
10450                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10451                     return -TARGET_EFAULT;
10452                 memset(target_st, 0, sizeof(*target_st));
10453                 __put_user(st.st_dev, &target_st->st_dev);
10454                 __put_user(st.st_ino, &target_st->st_ino);
10455                 __put_user(st.st_mode, &target_st->st_mode);
10456                 __put_user(st.st_uid, &target_st->st_uid);
10457                 __put_user(st.st_gid, &target_st->st_gid);
10458                 __put_user(st.st_nlink, &target_st->st_nlink);
10459                 __put_user(st.st_rdev, &target_st->st_rdev);
10460                 __put_user(st.st_size, &target_st->st_size);
10461                 __put_user(st.st_blksize, &target_st->st_blksize);
10462                 __put_user(st.st_blocks, &target_st->st_blocks);
10463                 __put_user(st.st_atime, &target_st->target_st_atime);
10464                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10465                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10466 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10467                 __put_user(st.st_atim.tv_nsec,
10468                            &target_st->target_st_atime_nsec);
10469                 __put_user(st.st_mtim.tv_nsec,
10470                            &target_st->target_st_mtime_nsec);
10471                 __put_user(st.st_ctim.tv_nsec,
10472                            &target_st->target_st_ctime_nsec);
10473 #endif
10474                 unlock_user_struct(target_st, arg2, 1);
10475             }
10476         }
10477         return ret;
10478 #endif
10479     case TARGET_NR_vhangup:
10480         return get_errno(vhangup());
10481 #ifdef TARGET_NR_syscall
10482     case TARGET_NR_syscall:
10483         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10484                           arg6, arg7, arg8, 0);
10485 #endif
10486 #if defined(TARGET_NR_wait4)
10487     case TARGET_NR_wait4:
10488         {
10489             int status;
10490             abi_long status_ptr = arg2;
10491             struct rusage rusage, *rusage_ptr;
10492             abi_ulong target_rusage = arg4;
10493             abi_long rusage_err;
10494             if (target_rusage)
10495                 rusage_ptr = &rusage;
10496             else
10497                 rusage_ptr = NULL;
10498             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10499             if (!is_error(ret)) {
10500                 if (status_ptr && ret) {
10501                     status = host_to_target_waitstatus(status);
10502                     if (put_user_s32(status, status_ptr))
10503                         return -TARGET_EFAULT;
10504                 }
10505                 if (target_rusage) {
10506                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10507                     if (rusage_err) {
10508                         ret = rusage_err;
10509                     }
10510                 }
10511             }
10512         }
10513         return ret;
10514 #endif
10515 #ifdef TARGET_NR_swapoff
10516     case TARGET_NR_swapoff:
10517         if (!(p = lock_user_string(arg1)))
10518             return -TARGET_EFAULT;
10519         ret = get_errno(swapoff(p));
10520         unlock_user(p, arg1, 0);
10521         return ret;
10522 #endif
10523     case TARGET_NR_sysinfo:
10524         {
10525             struct target_sysinfo *target_value;
10526             struct sysinfo value;
10527             ret = get_errno(sysinfo(&value));
10528             if (!is_error(ret) && arg1)
10529             {
10530                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10531                     return -TARGET_EFAULT;
10532                 __put_user(value.uptime, &target_value->uptime);
10533                 __put_user(value.loads[0], &target_value->loads[0]);
10534                 __put_user(value.loads[1], &target_value->loads[1]);
10535                 __put_user(value.loads[2], &target_value->loads[2]);
10536                 __put_user(value.totalram, &target_value->totalram);
10537                 __put_user(value.freeram, &target_value->freeram);
10538                 __put_user(value.sharedram, &target_value->sharedram);
10539                 __put_user(value.bufferram, &target_value->bufferram);
10540                 __put_user(value.totalswap, &target_value->totalswap);
10541                 __put_user(value.freeswap, &target_value->freeswap);
10542                 __put_user(value.procs, &target_value->procs);
10543                 __put_user(value.totalhigh, &target_value->totalhigh);
10544                 __put_user(value.freehigh, &target_value->freehigh);
10545                 __put_user(value.mem_unit, &target_value->mem_unit);
10546                 unlock_user_struct(target_value, arg1, 1);
10547             }
10548         }
10549         return ret;
10550 #ifdef TARGET_NR_ipc
10551     case TARGET_NR_ipc:
10552         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10553 #endif
10554 #ifdef TARGET_NR_semget
10555     case TARGET_NR_semget:
10556         return get_errno(semget(arg1, arg2, arg3));
10557 #endif
10558 #ifdef TARGET_NR_semop
10559     case TARGET_NR_semop:
10560         return do_semtimedop(arg1, arg2, arg3, 0, false);
10561 #endif
10562 #ifdef TARGET_NR_semtimedop
10563     case TARGET_NR_semtimedop:
10564         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10565 #endif
10566 #ifdef TARGET_NR_semtimedop_time64
10567     case TARGET_NR_semtimedop_time64:
10568         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10569 #endif
10570 #ifdef TARGET_NR_semctl
10571     case TARGET_NR_semctl:
10572         return do_semctl(arg1, arg2, arg3, arg4);
10573 #endif
10574 #ifdef TARGET_NR_msgctl
10575     case TARGET_NR_msgctl:
10576         return do_msgctl(arg1, arg2, arg3);
10577 #endif
10578 #ifdef TARGET_NR_msgget
10579     case TARGET_NR_msgget:
10580         return get_errno(msgget(arg1, arg2));
10581 #endif
10582 #ifdef TARGET_NR_msgrcv
10583     case TARGET_NR_msgrcv:
10584         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10585 #endif
10586 #ifdef TARGET_NR_msgsnd
10587     case TARGET_NR_msgsnd:
10588         return do_msgsnd(arg1, arg2, arg3, arg4);
10589 #endif
10590 #ifdef TARGET_NR_shmget
10591     case TARGET_NR_shmget:
10592         return get_errno(shmget(arg1, arg2, arg3));
10593 #endif
10594 #ifdef TARGET_NR_shmctl
10595     case TARGET_NR_shmctl:
10596         return do_shmctl(arg1, arg2, arg3);
10597 #endif
10598 #ifdef TARGET_NR_shmat
10599     case TARGET_NR_shmat:
10600         return do_shmat(cpu_env, arg1, arg2, arg3);
10601 #endif
10602 #ifdef TARGET_NR_shmdt
10603     case TARGET_NR_shmdt:
10604         return do_shmdt(arg1);
10605 #endif
10606     case TARGET_NR_fsync:
10607         return get_errno(fsync(arg1));
10608     case TARGET_NR_clone:
10609         /* Linux manages to have three different orderings for its
10610          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10611          * match the kernel's CONFIG_CLONE_* settings.
10612          * Microblaze is further special in that it uses a sixth
10613          * implicit argument to clone for the TLS pointer.
10614          */
10615 #if defined(TARGET_MICROBLAZE)
10616         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10617 #elif defined(TARGET_CLONE_BACKWARDS)
10618         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10619 #elif defined(TARGET_CLONE_BACKWARDS2)
10620         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10621 #else
10622         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10623 #endif
10624         return ret;
10625 #ifdef __NR_exit_group
10626         /* new thread calls */
10627     case TARGET_NR_exit_group:
10628         preexit_cleanup(cpu_env, arg1);
10629         return get_errno(exit_group(arg1));
10630 #endif
10631     case TARGET_NR_setdomainname:
10632         if (!(p = lock_user_string(arg1)))
10633             return -TARGET_EFAULT;
10634         ret = get_errno(setdomainname(p, arg2));
10635         unlock_user(p, arg1, 0);
10636         return ret;
10637     case TARGET_NR_uname:
10638         /* no need to transcode because we use the linux syscall */
10639         {
10640             struct new_utsname * buf;
10641 
10642             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10643                 return -TARGET_EFAULT;
10644             ret = get_errno(sys_uname(buf));
10645             if (!is_error(ret)) {
10646                 /* Overwrite the native machine name with whatever is being
10647                    emulated. */
10648                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10649                           sizeof(buf->machine));
10650                 /* Allow the user to override the reported release.  */
10651                 if (qemu_uname_release && *qemu_uname_release) {
10652                     g_strlcpy(buf->release, qemu_uname_release,
10653                               sizeof(buf->release));
10654                 }
10655             }
10656             unlock_user_struct(buf, arg1, 1);
10657         }
10658         return ret;
10659 #ifdef TARGET_I386
10660     case TARGET_NR_modify_ldt:
10661         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10662 #if !defined(TARGET_X86_64)
10663     case TARGET_NR_vm86:
10664         return do_vm86(cpu_env, arg1, arg2);
10665 #endif
10666 #endif
10667 #if defined(TARGET_NR_adjtimex)
10668     case TARGET_NR_adjtimex:
10669         {
10670             struct timex host_buf;
10671 
10672             if (target_to_host_timex(&host_buf, arg1) != 0) {
10673                 return -TARGET_EFAULT;
10674             }
10675             ret = get_errno(adjtimex(&host_buf));
10676             if (!is_error(ret)) {
10677                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10678                     return -TARGET_EFAULT;
10679                 }
10680             }
10681         }
10682         return ret;
10683 #endif
10684 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10685     case TARGET_NR_clock_adjtime:
10686         {
10687             struct timex htx, *phtx = &htx;
10688 
10689             if (target_to_host_timex(phtx, arg2) != 0) {
10690                 return -TARGET_EFAULT;
10691             }
10692             ret = get_errno(clock_adjtime(arg1, phtx));
10693             if (!is_error(ret) && phtx) {
10694                 if (host_to_target_timex(arg2, phtx) != 0) {
10695                     return -TARGET_EFAULT;
10696                 }
10697             }
10698         }
10699         return ret;
10700 #endif
10701 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10702     case TARGET_NR_clock_adjtime64:
10703         {
10704             struct timex htx;
10705 
10706             if (target_to_host_timex64(&htx, arg2) != 0) {
10707                 return -TARGET_EFAULT;
10708             }
10709             ret = get_errno(clock_adjtime(arg1, &htx));
10710             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10711                     return -TARGET_EFAULT;
10712             }
10713         }
10714         return ret;
10715 #endif
10716     case TARGET_NR_getpgid:
10717         return get_errno(getpgid(arg1));
10718     case TARGET_NR_fchdir:
10719         return get_errno(fchdir(arg1));
10720     case TARGET_NR_personality:
10721         return get_errno(personality(arg1));
10722 #ifdef TARGET_NR__llseek /* Not on alpha */
10723     case TARGET_NR__llseek:
10724         {
10725             int64_t res;
10726 #if !defined(__NR_llseek)
10727             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10728             if (res == -1) {
10729                 ret = get_errno(res);
10730             } else {
10731                 ret = 0;
10732             }
10733 #else
10734             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10735 #endif
10736             if ((ret == 0) && put_user_s64(res, arg4)) {
10737                 return -TARGET_EFAULT;
10738             }
10739         }
10740         return ret;
10741 #endif
10742 #ifdef TARGET_NR_getdents
10743     case TARGET_NR_getdents:
10744         return do_getdents(arg1, arg2, arg3);
10745 #endif /* TARGET_NR_getdents */
10746 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10747     case TARGET_NR_getdents64:
10748         return do_getdents64(arg1, arg2, arg3);
10749 #endif /* TARGET_NR_getdents64 */
10750 #if defined(TARGET_NR__newselect)
10751     case TARGET_NR__newselect:
10752         return do_select(arg1, arg2, arg3, arg4, arg5);
10753 #endif
10754 #ifdef TARGET_NR_poll
10755     case TARGET_NR_poll:
10756         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10757 #endif
10758 #ifdef TARGET_NR_ppoll
10759     case TARGET_NR_ppoll:
10760         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10761 #endif
10762 #ifdef TARGET_NR_ppoll_time64
10763     case TARGET_NR_ppoll_time64:
10764         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10765 #endif
10766     case TARGET_NR_flock:
10767         /* NOTE: the flock constant seems to be the same for every
10768            Linux platform */
10769         return get_errno(safe_flock(arg1, arg2));
10770     case TARGET_NR_readv:
10771         {
10772             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10773             if (vec != NULL) {
10774                 ret = get_errno(safe_readv(arg1, vec, arg3));
10775                 unlock_iovec(vec, arg2, arg3, 1);
10776             } else {
10777                 ret = -host_to_target_errno(errno);
10778             }
10779         }
10780         return ret;
10781     case TARGET_NR_writev:
10782         {
10783             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10784             if (vec != NULL) {
10785                 ret = get_errno(safe_writev(arg1, vec, arg3));
10786                 unlock_iovec(vec, arg2, arg3, 0);
10787             } else {
10788                 ret = -host_to_target_errno(errno);
10789             }
10790         }
10791         return ret;
10792 #if defined(TARGET_NR_preadv)
10793     case TARGET_NR_preadv:
10794         {
10795             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10796             if (vec != NULL) {
10797                 unsigned long low, high;
10798 
10799                 target_to_host_low_high(arg4, arg5, &low, &high);
10800                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10801                 unlock_iovec(vec, arg2, arg3, 1);
10802             } else {
10803                 ret = -host_to_target_errno(errno);
10804            }
10805         }
10806         return ret;
10807 #endif
10808 #if defined(TARGET_NR_pwritev)
10809     case TARGET_NR_pwritev:
10810         {
10811             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10812             if (vec != NULL) {
10813                 unsigned long low, high;
10814 
10815                 target_to_host_low_high(arg4, arg5, &low, &high);
10816                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10817                 unlock_iovec(vec, arg2, arg3, 0);
10818             } else {
10819                 ret = -host_to_target_errno(errno);
10820            }
10821         }
10822         return ret;
10823 #endif
10824     case TARGET_NR_getsid:
10825         return get_errno(getsid(arg1));
10826 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10827     case TARGET_NR_fdatasync:
10828         return get_errno(fdatasync(arg1));
10829 #endif
10830     case TARGET_NR_sched_getaffinity:
10831         {
10832             unsigned int mask_size;
10833             unsigned long *mask;
10834 
10835             /*
10836              * sched_getaffinity needs multiples of ulong, so need to take
10837              * care of mismatches between target ulong and host ulong sizes.
10838              */
10839             if (arg2 & (sizeof(abi_ulong) - 1)) {
10840                 return -TARGET_EINVAL;
10841             }
10842             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10843 
10844             mask = alloca(mask_size);
10845             memset(mask, 0, mask_size);
10846             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10847 
10848             if (!is_error(ret)) {
10849                 if (ret > arg2) {
10850                     /* More data returned than the caller's buffer will fit.
10851                      * This only happens if sizeof(abi_long) < sizeof(long)
10852                      * and the caller passed us a buffer holding an odd number
10853                      * of abi_longs. If the host kernel is actually using the
10854                      * extra 4 bytes then fail EINVAL; otherwise we can just
10855                      * ignore them and only copy the interesting part.
10856                      */
10857                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10858                     if (numcpus > arg2 * 8) {
10859                         return -TARGET_EINVAL;
10860                     }
10861                     ret = arg2;
10862                 }
10863 
10864                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10865                     return -TARGET_EFAULT;
10866                 }
10867             }
10868         }
10869         return ret;
10870     case TARGET_NR_sched_setaffinity:
10871         {
10872             unsigned int mask_size;
10873             unsigned long *mask;
10874 
10875             /*
10876              * sched_setaffinity needs multiples of ulong, so need to take
10877              * care of mismatches between target ulong and host ulong sizes.
10878              */
10879             if (arg2 & (sizeof(abi_ulong) - 1)) {
10880                 return -TARGET_EINVAL;
10881             }
10882             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10883             mask = alloca(mask_size);
10884 
10885             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10886             if (ret) {
10887                 return ret;
10888             }
10889 
10890             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10891         }
10892     case TARGET_NR_getcpu:
10893         {
10894             unsigned cpu, node;
10895             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10896                                        arg2 ? &node : NULL,
10897                                        NULL));
10898             if (is_error(ret)) {
10899                 return ret;
10900             }
10901             if (arg1 && put_user_u32(cpu, arg1)) {
10902                 return -TARGET_EFAULT;
10903             }
10904             if (arg2 && put_user_u32(node, arg2)) {
10905                 return -TARGET_EFAULT;
10906             }
10907         }
10908         return ret;
10909     case TARGET_NR_sched_setparam:
10910         {
10911             struct target_sched_param *target_schp;
10912             struct sched_param schp;
10913 
10914             if (arg2 == 0) {
10915                 return -TARGET_EINVAL;
10916             }
10917             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10918                 return -TARGET_EFAULT;
10919             }
10920             schp.sched_priority = tswap32(target_schp->sched_priority);
10921             unlock_user_struct(target_schp, arg2, 0);
10922             return get_errno(sys_sched_setparam(arg1, &schp));
10923         }
10924     case TARGET_NR_sched_getparam:
10925         {
10926             struct target_sched_param *target_schp;
10927             struct sched_param schp;
10928 
10929             if (arg2 == 0) {
10930                 return -TARGET_EINVAL;
10931             }
10932             ret = get_errno(sys_sched_getparam(arg1, &schp));
10933             if (!is_error(ret)) {
10934                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10935                     return -TARGET_EFAULT;
10936                 }
10937                 target_schp->sched_priority = tswap32(schp.sched_priority);
10938                 unlock_user_struct(target_schp, arg2, 1);
10939             }
10940         }
10941         return ret;
10942     case TARGET_NR_sched_setscheduler:
10943         {
10944             struct target_sched_param *target_schp;
10945             struct sched_param schp;
10946             if (arg3 == 0) {
10947                 return -TARGET_EINVAL;
10948             }
10949             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10950                 return -TARGET_EFAULT;
10951             }
10952             schp.sched_priority = tswap32(target_schp->sched_priority);
10953             unlock_user_struct(target_schp, arg3, 0);
10954             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10955         }
10956     case TARGET_NR_sched_getscheduler:
10957         return get_errno(sys_sched_getscheduler(arg1));
10958     case TARGET_NR_sched_getattr:
10959         {
10960             struct target_sched_attr *target_scha;
10961             struct sched_attr scha;
10962             if (arg2 == 0) {
10963                 return -TARGET_EINVAL;
10964             }
10965             if (arg3 > sizeof(scha)) {
10966                 arg3 = sizeof(scha);
10967             }
10968             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10969             if (!is_error(ret)) {
10970                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10971                 if (!target_scha) {
10972                     return -TARGET_EFAULT;
10973                 }
10974                 target_scha->size = tswap32(scha.size);
10975                 target_scha->sched_policy = tswap32(scha.sched_policy);
10976                 target_scha->sched_flags = tswap64(scha.sched_flags);
10977                 target_scha->sched_nice = tswap32(scha.sched_nice);
10978                 target_scha->sched_priority = tswap32(scha.sched_priority);
10979                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10980                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10981                 target_scha->sched_period = tswap64(scha.sched_period);
10982                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10983                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10984                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10985                 }
10986                 unlock_user(target_scha, arg2, arg3);
10987             }
10988             return ret;
10989         }
10990     case TARGET_NR_sched_setattr:
10991         {
10992             struct target_sched_attr *target_scha;
10993             struct sched_attr scha;
10994             uint32_t size;
10995             int zeroed;
10996             if (arg2 == 0) {
10997                 return -TARGET_EINVAL;
10998             }
10999             if (get_user_u32(size, arg2)) {
11000                 return -TARGET_EFAULT;
11001             }
11002             if (!size) {
11003                 size = offsetof(struct target_sched_attr, sched_util_min);
11004             }
11005             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11006                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11007                     return -TARGET_EFAULT;
11008                 }
11009                 return -TARGET_E2BIG;
11010             }
11011 
11012             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11013             if (zeroed < 0) {
11014                 return zeroed;
11015             } else if (zeroed == 0) {
11016                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11017                     return -TARGET_EFAULT;
11018                 }
11019                 return -TARGET_E2BIG;
11020             }
11021             if (size > sizeof(struct target_sched_attr)) {
11022                 size = sizeof(struct target_sched_attr);
11023             }
11024 
11025             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11026             if (!target_scha) {
11027                 return -TARGET_EFAULT;
11028             }
11029             scha.size = size;
11030             scha.sched_policy = tswap32(target_scha->sched_policy);
11031             scha.sched_flags = tswap64(target_scha->sched_flags);
11032             scha.sched_nice = tswap32(target_scha->sched_nice);
11033             scha.sched_priority = tswap32(target_scha->sched_priority);
11034             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11035             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11036             scha.sched_period = tswap64(target_scha->sched_period);
11037             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11038                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11039                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11040             }
11041             unlock_user(target_scha, arg2, 0);
11042             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11043         }
11044     case TARGET_NR_sched_yield:
11045         return get_errno(sched_yield());
11046     case TARGET_NR_sched_get_priority_max:
11047         return get_errno(sched_get_priority_max(arg1));
11048     case TARGET_NR_sched_get_priority_min:
11049         return get_errno(sched_get_priority_min(arg1));
11050 #ifdef TARGET_NR_sched_rr_get_interval
11051     case TARGET_NR_sched_rr_get_interval:
11052         {
11053             struct timespec ts;
11054             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11055             if (!is_error(ret)) {
11056                 ret = host_to_target_timespec(arg2, &ts);
11057             }
11058         }
11059         return ret;
11060 #endif
11061 #ifdef TARGET_NR_sched_rr_get_interval_time64
11062     case TARGET_NR_sched_rr_get_interval_time64:
11063         {
11064             struct timespec ts;
11065             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11066             if (!is_error(ret)) {
11067                 ret = host_to_target_timespec64(arg2, &ts);
11068             }
11069         }
11070         return ret;
11071 #endif
11072 #if defined(TARGET_NR_nanosleep)
11073     case TARGET_NR_nanosleep:
11074         {
11075             struct timespec req, rem;
11076             target_to_host_timespec(&req, arg1);
11077             ret = get_errno(safe_nanosleep(&req, &rem));
11078             if (is_error(ret) && arg2) {
11079                 host_to_target_timespec(arg2, &rem);
11080             }
11081         }
11082         return ret;
11083 #endif
11084     case TARGET_NR_prctl:
11085         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11086         break;
11087 #ifdef TARGET_NR_arch_prctl
11088     case TARGET_NR_arch_prctl:
11089         return do_arch_prctl(cpu_env, arg1, arg2);
11090 #endif
11091 #ifdef TARGET_NR_pread64
11092     case TARGET_NR_pread64:
11093         if (regpairs_aligned(cpu_env, num)) {
11094             arg4 = arg5;
11095             arg5 = arg6;
11096         }
11097         if (arg2 == 0 && arg3 == 0) {
11098             /* Special-case NULL buffer and zero length, which should succeed */
11099             p = 0;
11100         } else {
11101             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11102             if (!p) {
11103                 return -TARGET_EFAULT;
11104             }
11105         }
11106         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11107         unlock_user(p, arg2, ret);
11108         return ret;
11109     case TARGET_NR_pwrite64:
11110         if (regpairs_aligned(cpu_env, num)) {
11111             arg4 = arg5;
11112             arg5 = arg6;
11113         }
11114         if (arg2 == 0 && arg3 == 0) {
11115             /* Special-case NULL buffer and zero length, which should succeed */
11116             p = 0;
11117         } else {
11118             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11119             if (!p) {
11120                 return -TARGET_EFAULT;
11121             }
11122         }
11123         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11124         unlock_user(p, arg2, 0);
11125         return ret;
11126 #endif
11127     case TARGET_NR_getcwd:
11128         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11129             return -TARGET_EFAULT;
11130         ret = get_errno(sys_getcwd1(p, arg2));
11131         unlock_user(p, arg1, ret);
11132         return ret;
11133     case TARGET_NR_capget:
11134     case TARGET_NR_capset:
11135     {
11136         struct target_user_cap_header *target_header;
11137         struct target_user_cap_data *target_data = NULL;
11138         struct __user_cap_header_struct header;
11139         struct __user_cap_data_struct data[2];
11140         struct __user_cap_data_struct *dataptr = NULL;
11141         int i, target_datalen;
11142         int data_items = 1;
11143 
11144         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11145             return -TARGET_EFAULT;
11146         }
11147         header.version = tswap32(target_header->version);
11148         header.pid = tswap32(target_header->pid);
11149 
11150         if (header.version != _LINUX_CAPABILITY_VERSION) {
11151             /* Version 2 and up takes pointer to two user_data structs */
11152             data_items = 2;
11153         }
11154 
11155         target_datalen = sizeof(*target_data) * data_items;
11156 
11157         if (arg2) {
11158             if (num == TARGET_NR_capget) {
11159                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11160             } else {
11161                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11162             }
11163             if (!target_data) {
11164                 unlock_user_struct(target_header, arg1, 0);
11165                 return -TARGET_EFAULT;
11166             }
11167 
11168             if (num == TARGET_NR_capset) {
11169                 for (i = 0; i < data_items; i++) {
11170                     data[i].effective = tswap32(target_data[i].effective);
11171                     data[i].permitted = tswap32(target_data[i].permitted);
11172                     data[i].inheritable = tswap32(target_data[i].inheritable);
11173                 }
11174             }
11175 
11176             dataptr = data;
11177         }
11178 
11179         if (num == TARGET_NR_capget) {
11180             ret = get_errno(capget(&header, dataptr));
11181         } else {
11182             ret = get_errno(capset(&header, dataptr));
11183         }
11184 
11185         /* The kernel always updates version for both capget and capset */
11186         target_header->version = tswap32(header.version);
11187         unlock_user_struct(target_header, arg1, 1);
11188 
11189         if (arg2) {
11190             if (num == TARGET_NR_capget) {
11191                 for (i = 0; i < data_items; i++) {
11192                     target_data[i].effective = tswap32(data[i].effective);
11193                     target_data[i].permitted = tswap32(data[i].permitted);
11194                     target_data[i].inheritable = tswap32(data[i].inheritable);
11195                 }
11196                 unlock_user(target_data, arg2, target_datalen);
11197             } else {
11198                 unlock_user(target_data, arg2, 0);
11199             }
11200         }
11201         return ret;
11202     }
11203     case TARGET_NR_sigaltstack:
11204         return do_sigaltstack(arg1, arg2, cpu_env);
11205 
11206 #ifdef CONFIG_SENDFILE
11207 #ifdef TARGET_NR_sendfile
11208     case TARGET_NR_sendfile:
11209     {
11210         off_t *offp = NULL;
11211         off_t off;
11212         if (arg3) {
11213             ret = get_user_sal(off, arg3);
11214             if (is_error(ret)) {
11215                 return ret;
11216             }
11217             offp = &off;
11218         }
11219         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11220         if (!is_error(ret) && arg3) {
11221             abi_long ret2 = put_user_sal(off, arg3);
11222             if (is_error(ret2)) {
11223                 ret = ret2;
11224             }
11225         }
11226         return ret;
11227     }
11228 #endif
11229 #ifdef TARGET_NR_sendfile64
11230     case TARGET_NR_sendfile64:
11231     {
11232         off_t *offp = NULL;
11233         off_t off;
11234         if (arg3) {
11235             ret = get_user_s64(off, arg3);
11236             if (is_error(ret)) {
11237                 return ret;
11238             }
11239             offp = &off;
11240         }
11241         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11242         if (!is_error(ret) && arg3) {
11243             abi_long ret2 = put_user_s64(off, arg3);
11244             if (is_error(ret2)) {
11245                 ret = ret2;
11246             }
11247         }
11248         return ret;
11249     }
11250 #endif
11251 #endif
11252 #ifdef TARGET_NR_vfork
11253     case TARGET_NR_vfork:
11254         return get_errno(do_fork(cpu_env,
11255                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11256                          0, 0, 0, 0));
11257 #endif
11258 #ifdef TARGET_NR_ugetrlimit
11259     case TARGET_NR_ugetrlimit:
11260     {
11261 	struct rlimit rlim;
11262 	int resource = target_to_host_resource(arg1);
11263 	ret = get_errno(getrlimit(resource, &rlim));
11264 	if (!is_error(ret)) {
11265 	    struct target_rlimit *target_rlim;
11266             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11267                 return -TARGET_EFAULT;
11268 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11269 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11270             unlock_user_struct(target_rlim, arg2, 1);
11271 	}
11272         return ret;
11273     }
11274 #endif
11275 #ifdef TARGET_NR_truncate64
11276     case TARGET_NR_truncate64:
11277         if (!(p = lock_user_string(arg1)))
11278             return -TARGET_EFAULT;
11279 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11280         unlock_user(p, arg1, 0);
11281         return ret;
11282 #endif
11283 #ifdef TARGET_NR_ftruncate64
11284     case TARGET_NR_ftruncate64:
11285         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11286 #endif
11287 #ifdef TARGET_NR_stat64
11288     case TARGET_NR_stat64:
11289         if (!(p = lock_user_string(arg1))) {
11290             return -TARGET_EFAULT;
11291         }
11292         ret = get_errno(stat(path(p), &st));
11293         unlock_user(p, arg1, 0);
11294         if (!is_error(ret))
11295             ret = host_to_target_stat64(cpu_env, arg2, &st);
11296         return ret;
11297 #endif
11298 #ifdef TARGET_NR_lstat64
11299     case TARGET_NR_lstat64:
11300         if (!(p = lock_user_string(arg1))) {
11301             return -TARGET_EFAULT;
11302         }
11303         ret = get_errno(lstat(path(p), &st));
11304         unlock_user(p, arg1, 0);
11305         if (!is_error(ret))
11306             ret = host_to_target_stat64(cpu_env, arg2, &st);
11307         return ret;
11308 #endif
11309 #ifdef TARGET_NR_fstat64
11310     case TARGET_NR_fstat64:
11311         ret = get_errno(fstat(arg1, &st));
11312         if (!is_error(ret))
11313             ret = host_to_target_stat64(cpu_env, arg2, &st);
11314         return ret;
11315 #endif
11316 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11317 #ifdef TARGET_NR_fstatat64
11318     case TARGET_NR_fstatat64:
11319 #endif
11320 #ifdef TARGET_NR_newfstatat
11321     case TARGET_NR_newfstatat:
11322 #endif
11323         if (!(p = lock_user_string(arg2))) {
11324             return -TARGET_EFAULT;
11325         }
11326         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11327         unlock_user(p, arg2, 0);
11328         if (!is_error(ret))
11329             ret = host_to_target_stat64(cpu_env, arg3, &st);
11330         return ret;
11331 #endif
11332 #if defined(TARGET_NR_statx)
11333     case TARGET_NR_statx:
11334         {
11335             struct target_statx *target_stx;
11336             int dirfd = arg1;
11337             int flags = arg3;
11338 
11339             p = lock_user_string(arg2);
11340             if (p == NULL) {
11341                 return -TARGET_EFAULT;
11342             }
11343 #if defined(__NR_statx)
11344             {
11345                 /*
11346                  * It is assumed that struct statx is architecture independent.
11347                  */
11348                 struct target_statx host_stx;
11349                 int mask = arg4;
11350 
11351                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11352                 if (!is_error(ret)) {
11353                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11354                         unlock_user(p, arg2, 0);
11355                         return -TARGET_EFAULT;
11356                     }
11357                 }
11358 
11359                 if (ret != -TARGET_ENOSYS) {
11360                     unlock_user(p, arg2, 0);
11361                     return ret;
11362                 }
11363             }
11364 #endif
11365             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11366             unlock_user(p, arg2, 0);
11367 
11368             if (!is_error(ret)) {
11369                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11370                     return -TARGET_EFAULT;
11371                 }
11372                 memset(target_stx, 0, sizeof(*target_stx));
11373                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11374                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11375                 __put_user(st.st_ino, &target_stx->stx_ino);
11376                 __put_user(st.st_mode, &target_stx->stx_mode);
11377                 __put_user(st.st_uid, &target_stx->stx_uid);
11378                 __put_user(st.st_gid, &target_stx->stx_gid);
11379                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11380                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11381                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11382                 __put_user(st.st_size, &target_stx->stx_size);
11383                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11384                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11385                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11386                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11387                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11388                 unlock_user_struct(target_stx, arg5, 1);
11389             }
11390         }
11391         return ret;
11392 #endif
11393 #ifdef TARGET_NR_lchown
11394     case TARGET_NR_lchown:
11395         if (!(p = lock_user_string(arg1)))
11396             return -TARGET_EFAULT;
11397         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11398         unlock_user(p, arg1, 0);
11399         return ret;
11400 #endif
11401 #ifdef TARGET_NR_getuid
11402     case TARGET_NR_getuid:
11403         return get_errno(high2lowuid(getuid()));
11404 #endif
11405 #ifdef TARGET_NR_getgid
11406     case TARGET_NR_getgid:
11407         return get_errno(high2lowgid(getgid()));
11408 #endif
11409 #ifdef TARGET_NR_geteuid
11410     case TARGET_NR_geteuid:
11411         return get_errno(high2lowuid(geteuid()));
11412 #endif
11413 #ifdef TARGET_NR_getegid
11414     case TARGET_NR_getegid:
11415         return get_errno(high2lowgid(getegid()));
11416 #endif
11417     case TARGET_NR_setreuid:
11418         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11419     case TARGET_NR_setregid:
11420         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11421     case TARGET_NR_getgroups:
11422         {
11423             int gidsetsize = arg1;
11424             target_id *target_grouplist;
11425             gid_t *grouplist;
11426             int i;
11427 
11428             grouplist = alloca(gidsetsize * sizeof(gid_t));
11429             ret = get_errno(getgroups(gidsetsize, grouplist));
11430             if (gidsetsize == 0)
11431                 return ret;
11432             if (!is_error(ret)) {
11433                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11434                 if (!target_grouplist)
11435                     return -TARGET_EFAULT;
11436                 for(i = 0;i < ret; i++)
11437                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11438                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11439             }
11440         }
11441         return ret;
11442     case TARGET_NR_setgroups:
11443         {
11444             int gidsetsize = arg1;
11445             target_id *target_grouplist;
11446             gid_t *grouplist = NULL;
11447             int i;
11448             if (gidsetsize) {
11449                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11450                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11451                 if (!target_grouplist) {
11452                     return -TARGET_EFAULT;
11453                 }
11454                 for (i = 0; i < gidsetsize; i++) {
11455                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11456                 }
11457                 unlock_user(target_grouplist, arg2, 0);
11458             }
11459             return get_errno(setgroups(gidsetsize, grouplist));
11460         }
11461     case TARGET_NR_fchown:
11462         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11463 #if defined(TARGET_NR_fchownat)
11464     case TARGET_NR_fchownat:
11465         if (!(p = lock_user_string(arg2)))
11466             return -TARGET_EFAULT;
11467         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11468                                  low2highgid(arg4), arg5));
11469         unlock_user(p, arg2, 0);
11470         return ret;
11471 #endif
11472 #ifdef TARGET_NR_setresuid
11473     case TARGET_NR_setresuid:
11474         return get_errno(sys_setresuid(low2highuid(arg1),
11475                                        low2highuid(arg2),
11476                                        low2highuid(arg3)));
11477 #endif
11478 #ifdef TARGET_NR_getresuid
11479     case TARGET_NR_getresuid:
11480         {
11481             uid_t ruid, euid, suid;
11482             ret = get_errno(getresuid(&ruid, &euid, &suid));
11483             if (!is_error(ret)) {
11484                 if (put_user_id(high2lowuid(ruid), arg1)
11485                     || put_user_id(high2lowuid(euid), arg2)
11486                     || put_user_id(high2lowuid(suid), arg3))
11487                     return -TARGET_EFAULT;
11488             }
11489         }
11490         return ret;
11491 #endif
11492 #ifdef TARGET_NR_getresgid
11493     case TARGET_NR_setresgid:
11494         return get_errno(sys_setresgid(low2highgid(arg1),
11495                                        low2highgid(arg2),
11496                                        low2highgid(arg3)));
11497 #endif
11498 #ifdef TARGET_NR_getresgid
11499     case TARGET_NR_getresgid:
11500         {
11501             gid_t rgid, egid, sgid;
11502             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11503             if (!is_error(ret)) {
11504                 if (put_user_id(high2lowgid(rgid), arg1)
11505                     || put_user_id(high2lowgid(egid), arg2)
11506                     || put_user_id(high2lowgid(sgid), arg3))
11507                     return -TARGET_EFAULT;
11508             }
11509         }
11510         return ret;
11511 #endif
11512 #ifdef TARGET_NR_chown
11513     case TARGET_NR_chown:
11514         if (!(p = lock_user_string(arg1)))
11515             return -TARGET_EFAULT;
11516         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11517         unlock_user(p, arg1, 0);
11518         return ret;
11519 #endif
11520     case TARGET_NR_setuid:
11521         return get_errno(sys_setuid(low2highuid(arg1)));
11522     case TARGET_NR_setgid:
11523         return get_errno(sys_setgid(low2highgid(arg1)));
11524     case TARGET_NR_setfsuid:
11525         return get_errno(setfsuid(arg1));
11526     case TARGET_NR_setfsgid:
11527         return get_errno(setfsgid(arg1));
11528 
11529 #ifdef TARGET_NR_lchown32
11530     case TARGET_NR_lchown32:
11531         if (!(p = lock_user_string(arg1)))
11532             return -TARGET_EFAULT;
11533         ret = get_errno(lchown(p, arg2, arg3));
11534         unlock_user(p, arg1, 0);
11535         return ret;
11536 #endif
11537 #ifdef TARGET_NR_getuid32
11538     case TARGET_NR_getuid32:
11539         return get_errno(getuid());
11540 #endif
11541 
11542 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11543    /* Alpha specific */
11544     case TARGET_NR_getxuid:
11545          {
11546             uid_t euid;
11547             euid=geteuid();
11548             cpu_env->ir[IR_A4]=euid;
11549          }
11550         return get_errno(getuid());
11551 #endif
11552 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11553    /* Alpha specific */
11554     case TARGET_NR_getxgid:
11555          {
11556             uid_t egid;
11557             egid=getegid();
11558             cpu_env->ir[IR_A4]=egid;
11559          }
11560         return get_errno(getgid());
11561 #endif
11562 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11563     /* Alpha specific */
11564     case TARGET_NR_osf_getsysinfo:
11565         ret = -TARGET_EOPNOTSUPP;
11566         switch (arg1) {
11567           case TARGET_GSI_IEEE_FP_CONTROL:
11568             {
11569                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11570                 uint64_t swcr = cpu_env->swcr;
11571 
11572                 swcr &= ~SWCR_STATUS_MASK;
11573                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11574 
11575                 if (put_user_u64 (swcr, arg2))
11576                         return -TARGET_EFAULT;
11577                 ret = 0;
11578             }
11579             break;
11580 
11581           /* case GSI_IEEE_STATE_AT_SIGNAL:
11582              -- Not implemented in linux kernel.
11583              case GSI_UACPROC:
11584              -- Retrieves current unaligned access state; not much used.
11585              case GSI_PROC_TYPE:
11586              -- Retrieves implver information; surely not used.
11587              case GSI_GET_HWRPB:
11588              -- Grabs a copy of the HWRPB; surely not used.
11589           */
11590         }
11591         return ret;
11592 #endif
11593 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11594     /* Alpha specific */
11595     case TARGET_NR_osf_setsysinfo:
11596         ret = -TARGET_EOPNOTSUPP;
11597         switch (arg1) {
11598           case TARGET_SSI_IEEE_FP_CONTROL:
11599             {
11600                 uint64_t swcr, fpcr;
11601 
11602                 if (get_user_u64 (swcr, arg2)) {
11603                     return -TARGET_EFAULT;
11604                 }
11605 
11606                 /*
11607                  * The kernel calls swcr_update_status to update the
11608                  * status bits from the fpcr at every point that it
11609                  * could be queried.  Therefore, we store the status
11610                  * bits only in FPCR.
11611                  */
11612                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11613 
11614                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11615                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11616                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11617                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11618                 ret = 0;
11619             }
11620             break;
11621 
11622           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11623             {
11624                 uint64_t exc, fpcr, fex;
11625 
11626                 if (get_user_u64(exc, arg2)) {
11627                     return -TARGET_EFAULT;
11628                 }
11629                 exc &= SWCR_STATUS_MASK;
11630                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11631 
11632                 /* Old exceptions are not signaled.  */
11633                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11634                 fex = exc & ~fex;
11635                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11636                 fex &= (cpu_env)->swcr;
11637 
11638                 /* Update the hardware fpcr.  */
11639                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11640                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11641 
11642                 if (fex) {
11643                     int si_code = TARGET_FPE_FLTUNK;
11644                     target_siginfo_t info;
11645 
11646                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11647                         si_code = TARGET_FPE_FLTUND;
11648                     }
11649                     if (fex & SWCR_TRAP_ENABLE_INE) {
11650                         si_code = TARGET_FPE_FLTRES;
11651                     }
11652                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11653                         si_code = TARGET_FPE_FLTUND;
11654                     }
11655                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11656                         si_code = TARGET_FPE_FLTOVF;
11657                     }
11658                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11659                         si_code = TARGET_FPE_FLTDIV;
11660                     }
11661                     if (fex & SWCR_TRAP_ENABLE_INV) {
11662                         si_code = TARGET_FPE_FLTINV;
11663                     }
11664 
11665                     info.si_signo = SIGFPE;
11666                     info.si_errno = 0;
11667                     info.si_code = si_code;
11668                     info._sifields._sigfault._addr = (cpu_env)->pc;
11669                     queue_signal(cpu_env, info.si_signo,
11670                                  QEMU_SI_FAULT, &info);
11671                 }
11672                 ret = 0;
11673             }
11674             break;
11675 
11676           /* case SSI_NVPAIRS:
11677              -- Used with SSIN_UACPROC to enable unaligned accesses.
11678              case SSI_IEEE_STATE_AT_SIGNAL:
11679              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11680              -- Not implemented in linux kernel
11681           */
11682         }
11683         return ret;
11684 #endif
11685 #ifdef TARGET_NR_osf_sigprocmask
11686     /* Alpha specific.  */
11687     case TARGET_NR_osf_sigprocmask:
11688         {
11689             abi_ulong mask;
11690             int how;
11691             sigset_t set, oldset;
11692 
11693             switch(arg1) {
11694             case TARGET_SIG_BLOCK:
11695                 how = SIG_BLOCK;
11696                 break;
11697             case TARGET_SIG_UNBLOCK:
11698                 how = SIG_UNBLOCK;
11699                 break;
11700             case TARGET_SIG_SETMASK:
11701                 how = SIG_SETMASK;
11702                 break;
11703             default:
11704                 return -TARGET_EINVAL;
11705             }
11706             mask = arg2;
11707             target_to_host_old_sigset(&set, &mask);
11708             ret = do_sigprocmask(how, &set, &oldset);
11709             if (!ret) {
11710                 host_to_target_old_sigset(&mask, &oldset);
11711                 ret = mask;
11712             }
11713         }
11714         return ret;
11715 #endif
11716 
11717 #ifdef TARGET_NR_getgid32
11718     case TARGET_NR_getgid32:
11719         return get_errno(getgid());
11720 #endif
11721 #ifdef TARGET_NR_geteuid32
11722     case TARGET_NR_geteuid32:
11723         return get_errno(geteuid());
11724 #endif
11725 #ifdef TARGET_NR_getegid32
11726     case TARGET_NR_getegid32:
11727         return get_errno(getegid());
11728 #endif
11729 #ifdef TARGET_NR_setreuid32
11730     case TARGET_NR_setreuid32:
11731         return get_errno(setreuid(arg1, arg2));
11732 #endif
11733 #ifdef TARGET_NR_setregid32
11734     case TARGET_NR_setregid32:
11735         return get_errno(setregid(arg1, arg2));
11736 #endif
11737 #ifdef TARGET_NR_getgroups32
11738     case TARGET_NR_getgroups32:
11739         {
11740             int gidsetsize = arg1;
11741             uint32_t *target_grouplist;
11742             gid_t *grouplist;
11743             int i;
11744 
11745             grouplist = alloca(gidsetsize * sizeof(gid_t));
11746             ret = get_errno(getgroups(gidsetsize, grouplist));
11747             if (gidsetsize == 0)
11748                 return ret;
11749             if (!is_error(ret)) {
11750                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11751                 if (!target_grouplist) {
11752                     return -TARGET_EFAULT;
11753                 }
11754                 for(i = 0;i < ret; i++)
11755                     target_grouplist[i] = tswap32(grouplist[i]);
11756                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11757             }
11758         }
11759         return ret;
11760 #endif
11761 #ifdef TARGET_NR_setgroups32
11762     case TARGET_NR_setgroups32:
11763         {
11764             int gidsetsize = arg1;
11765             uint32_t *target_grouplist;
11766             gid_t *grouplist;
11767             int i;
11768 
11769             grouplist = alloca(gidsetsize * sizeof(gid_t));
11770             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11771             if (!target_grouplist) {
11772                 return -TARGET_EFAULT;
11773             }
11774             for(i = 0;i < gidsetsize; i++)
11775                 grouplist[i] = tswap32(target_grouplist[i]);
11776             unlock_user(target_grouplist, arg2, 0);
11777             return get_errno(setgroups(gidsetsize, grouplist));
11778         }
11779 #endif
11780 #ifdef TARGET_NR_fchown32
11781     case TARGET_NR_fchown32:
11782         return get_errno(fchown(arg1, arg2, arg3));
11783 #endif
11784 #ifdef TARGET_NR_setresuid32
11785     case TARGET_NR_setresuid32:
11786         return get_errno(sys_setresuid(arg1, arg2, arg3));
11787 #endif
11788 #ifdef TARGET_NR_getresuid32
11789     case TARGET_NR_getresuid32:
11790         {
11791             uid_t ruid, euid, suid;
11792             ret = get_errno(getresuid(&ruid, &euid, &suid));
11793             if (!is_error(ret)) {
11794                 if (put_user_u32(ruid, arg1)
11795                     || put_user_u32(euid, arg2)
11796                     || put_user_u32(suid, arg3))
11797                     return -TARGET_EFAULT;
11798             }
11799         }
11800         return ret;
11801 #endif
11802 #ifdef TARGET_NR_setresgid32
11803     case TARGET_NR_setresgid32:
11804         return get_errno(sys_setresgid(arg1, arg2, arg3));
11805 #endif
11806 #ifdef TARGET_NR_getresgid32
11807     case TARGET_NR_getresgid32:
11808         {
11809             gid_t rgid, egid, sgid;
11810             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11811             if (!is_error(ret)) {
11812                 if (put_user_u32(rgid, arg1)
11813                     || put_user_u32(egid, arg2)
11814                     || put_user_u32(sgid, arg3))
11815                     return -TARGET_EFAULT;
11816             }
11817         }
11818         return ret;
11819 #endif
11820 #ifdef TARGET_NR_chown32
11821     case TARGET_NR_chown32:
11822         if (!(p = lock_user_string(arg1)))
11823             return -TARGET_EFAULT;
11824         ret = get_errno(chown(p, arg2, arg3));
11825         unlock_user(p, arg1, 0);
11826         return ret;
11827 #endif
11828 #ifdef TARGET_NR_setuid32
11829     case TARGET_NR_setuid32:
11830         return get_errno(sys_setuid(arg1));
11831 #endif
11832 #ifdef TARGET_NR_setgid32
11833     case TARGET_NR_setgid32:
11834         return get_errno(sys_setgid(arg1));
11835 #endif
11836 #ifdef TARGET_NR_setfsuid32
11837     case TARGET_NR_setfsuid32:
11838         return get_errno(setfsuid(arg1));
11839 #endif
11840 #ifdef TARGET_NR_setfsgid32
11841     case TARGET_NR_setfsgid32:
11842         return get_errno(setfsgid(arg1));
11843 #endif
11844 #ifdef TARGET_NR_mincore
11845     case TARGET_NR_mincore:
11846         {
11847             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11848             if (!a) {
11849                 return -TARGET_ENOMEM;
11850             }
11851             p = lock_user_string(arg3);
11852             if (!p) {
11853                 ret = -TARGET_EFAULT;
11854             } else {
11855                 ret = get_errno(mincore(a, arg2, p));
11856                 unlock_user(p, arg3, ret);
11857             }
11858             unlock_user(a, arg1, 0);
11859         }
11860         return ret;
11861 #endif
11862 #ifdef TARGET_NR_arm_fadvise64_64
11863     case TARGET_NR_arm_fadvise64_64:
11864         /* arm_fadvise64_64 looks like fadvise64_64 but
11865          * with different argument order: fd, advice, offset, len
11866          * rather than the usual fd, offset, len, advice.
11867          * Note that offset and len are both 64-bit so appear as
11868          * pairs of 32-bit registers.
11869          */
11870         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11871                             target_offset64(arg5, arg6), arg2);
11872         return -host_to_target_errno(ret);
11873 #endif
11874 
11875 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
11876 
11877 #ifdef TARGET_NR_fadvise64_64
11878     case TARGET_NR_fadvise64_64:
11879 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11880         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11881         ret = arg2;
11882         arg2 = arg3;
11883         arg3 = arg4;
11884         arg4 = arg5;
11885         arg5 = arg6;
11886         arg6 = ret;
11887 #else
11888         /* 6 args: fd, offset (high, low), len (high, low), advice */
11889         if (regpairs_aligned(cpu_env, num)) {
11890             /* offset is in (3,4), len in (5,6) and advice in 7 */
11891             arg2 = arg3;
11892             arg3 = arg4;
11893             arg4 = arg5;
11894             arg5 = arg6;
11895             arg6 = arg7;
11896         }
11897 #endif
11898         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11899                             target_offset64(arg4, arg5), arg6);
11900         return -host_to_target_errno(ret);
11901 #endif
11902 
11903 #ifdef TARGET_NR_fadvise64
11904     case TARGET_NR_fadvise64:
11905         /* 5 args: fd, offset (high, low), len, advice */
11906         if (regpairs_aligned(cpu_env, num)) {
11907             /* offset is in (3,4), len in 5 and advice in 6 */
11908             arg2 = arg3;
11909             arg3 = arg4;
11910             arg4 = arg5;
11911             arg5 = arg6;
11912         }
11913         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11914         return -host_to_target_errno(ret);
11915 #endif
11916 
11917 #else /* not a 32-bit ABI */
11918 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11919 #ifdef TARGET_NR_fadvise64_64
11920     case TARGET_NR_fadvise64_64:
11921 #endif
11922 #ifdef TARGET_NR_fadvise64
11923     case TARGET_NR_fadvise64:
11924 #endif
11925 #ifdef TARGET_S390X
11926         switch (arg4) {
11927         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11928         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11929         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11930         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11931         default: break;
11932         }
11933 #endif
11934         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11935 #endif
11936 #endif /* end of 64-bit ABI fadvise handling */
11937 
11938 #ifdef TARGET_NR_madvise
11939     case TARGET_NR_madvise:
11940         return target_madvise(arg1, arg2, arg3);
11941 #endif
11942 #ifdef TARGET_NR_fcntl64
11943     case TARGET_NR_fcntl64:
11944     {
11945         int cmd;
11946         struct flock64 fl;
11947         from_flock64_fn *copyfrom = copy_from_user_flock64;
11948         to_flock64_fn *copyto = copy_to_user_flock64;
11949 
11950 #ifdef TARGET_ARM
11951         if (!cpu_env->eabi) {
11952             copyfrom = copy_from_user_oabi_flock64;
11953             copyto = copy_to_user_oabi_flock64;
11954         }
11955 #endif
11956 
11957         cmd = target_to_host_fcntl_cmd(arg2);
11958         if (cmd == -TARGET_EINVAL) {
11959             return cmd;
11960         }
11961 
11962         switch(arg2) {
11963         case TARGET_F_GETLK64:
11964             ret = copyfrom(&fl, arg3);
11965             if (ret) {
11966                 break;
11967             }
11968             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11969             if (ret == 0) {
11970                 ret = copyto(arg3, &fl);
11971             }
11972 	    break;
11973 
11974         case TARGET_F_SETLK64:
11975         case TARGET_F_SETLKW64:
11976             ret = copyfrom(&fl, arg3);
11977             if (ret) {
11978                 break;
11979             }
11980             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11981 	    break;
11982         default:
11983             ret = do_fcntl(arg1, arg2, arg3);
11984             break;
11985         }
11986         return ret;
11987     }
11988 #endif
11989 #ifdef TARGET_NR_cacheflush
11990     case TARGET_NR_cacheflush:
11991         /* self-modifying code is handled automatically, so nothing needed */
11992         return 0;
11993 #endif
11994 #ifdef TARGET_NR_getpagesize
11995     case TARGET_NR_getpagesize:
11996         return TARGET_PAGE_SIZE;
11997 #endif
11998     case TARGET_NR_gettid:
11999         return get_errno(sys_gettid());
12000 #ifdef TARGET_NR_readahead
12001     case TARGET_NR_readahead:
12002 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12003         if (regpairs_aligned(cpu_env, num)) {
12004             arg2 = arg3;
12005             arg3 = arg4;
12006             arg4 = arg5;
12007         }
12008         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12009 #else
12010         ret = get_errno(readahead(arg1, arg2, arg3));
12011 #endif
12012         return ret;
12013 #endif
12014 #ifdef CONFIG_ATTR
12015 #ifdef TARGET_NR_setxattr
12016     case TARGET_NR_listxattr:
12017     case TARGET_NR_llistxattr:
12018     {
12019         void *p, *b = 0;
12020         if (arg2) {
12021             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12022             if (!b) {
12023                 return -TARGET_EFAULT;
12024             }
12025         }
12026         p = lock_user_string(arg1);
12027         if (p) {
12028             if (num == TARGET_NR_listxattr) {
12029                 ret = get_errno(listxattr(p, b, arg3));
12030             } else {
12031                 ret = get_errno(llistxattr(p, b, arg3));
12032             }
12033         } else {
12034             ret = -TARGET_EFAULT;
12035         }
12036         unlock_user(p, arg1, 0);
12037         unlock_user(b, arg2, arg3);
12038         return ret;
12039     }
12040     case TARGET_NR_flistxattr:
12041     {
12042         void *b = 0;
12043         if (arg2) {
12044             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12045             if (!b) {
12046                 return -TARGET_EFAULT;
12047             }
12048         }
12049         ret = get_errno(flistxattr(arg1, b, arg3));
12050         unlock_user(b, arg2, arg3);
12051         return ret;
12052     }
12053     case TARGET_NR_setxattr:
12054     case TARGET_NR_lsetxattr:
12055         {
12056             void *p, *n, *v = 0;
12057             if (arg3) {
12058                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12059                 if (!v) {
12060                     return -TARGET_EFAULT;
12061                 }
12062             }
12063             p = lock_user_string(arg1);
12064             n = lock_user_string(arg2);
12065             if (p && n) {
12066                 if (num == TARGET_NR_setxattr) {
12067                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12068                 } else {
12069                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12070                 }
12071             } else {
12072                 ret = -TARGET_EFAULT;
12073             }
12074             unlock_user(p, arg1, 0);
12075             unlock_user(n, arg2, 0);
12076             unlock_user(v, arg3, 0);
12077         }
12078         return ret;
12079     case TARGET_NR_fsetxattr:
12080         {
12081             void *n, *v = 0;
12082             if (arg3) {
12083                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12084                 if (!v) {
12085                     return -TARGET_EFAULT;
12086                 }
12087             }
12088             n = lock_user_string(arg2);
12089             if (n) {
12090                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12091             } else {
12092                 ret = -TARGET_EFAULT;
12093             }
12094             unlock_user(n, arg2, 0);
12095             unlock_user(v, arg3, 0);
12096         }
12097         return ret;
12098     case TARGET_NR_getxattr:
12099     case TARGET_NR_lgetxattr:
12100         {
12101             void *p, *n, *v = 0;
12102             if (arg3) {
12103                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12104                 if (!v) {
12105                     return -TARGET_EFAULT;
12106                 }
12107             }
12108             p = lock_user_string(arg1);
12109             n = lock_user_string(arg2);
12110             if (p && n) {
12111                 if (num == TARGET_NR_getxattr) {
12112                     ret = get_errno(getxattr(p, n, v, arg4));
12113                 } else {
12114                     ret = get_errno(lgetxattr(p, n, v, arg4));
12115                 }
12116             } else {
12117                 ret = -TARGET_EFAULT;
12118             }
12119             unlock_user(p, arg1, 0);
12120             unlock_user(n, arg2, 0);
12121             unlock_user(v, arg3, arg4);
12122         }
12123         return ret;
12124     case TARGET_NR_fgetxattr:
12125         {
12126             void *n, *v = 0;
12127             if (arg3) {
12128                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12129                 if (!v) {
12130                     return -TARGET_EFAULT;
12131                 }
12132             }
12133             n = lock_user_string(arg2);
12134             if (n) {
12135                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12136             } else {
12137                 ret = -TARGET_EFAULT;
12138             }
12139             unlock_user(n, arg2, 0);
12140             unlock_user(v, arg3, arg4);
12141         }
12142         return ret;
12143     case TARGET_NR_removexattr:
12144     case TARGET_NR_lremovexattr:
12145         {
12146             void *p, *n;
12147             p = lock_user_string(arg1);
12148             n = lock_user_string(arg2);
12149             if (p && n) {
12150                 if (num == TARGET_NR_removexattr) {
12151                     ret = get_errno(removexattr(p, n));
12152                 } else {
12153                     ret = get_errno(lremovexattr(p, n));
12154                 }
12155             } else {
12156                 ret = -TARGET_EFAULT;
12157             }
12158             unlock_user(p, arg1, 0);
12159             unlock_user(n, arg2, 0);
12160         }
12161         return ret;
12162     case TARGET_NR_fremovexattr:
12163         {
12164             void *n;
12165             n = lock_user_string(arg2);
12166             if (n) {
12167                 ret = get_errno(fremovexattr(arg1, n));
12168             } else {
12169                 ret = -TARGET_EFAULT;
12170             }
12171             unlock_user(n, arg2, 0);
12172         }
12173         return ret;
12174 #endif
12175 #endif /* CONFIG_ATTR */
12176 #ifdef TARGET_NR_set_thread_area
12177     case TARGET_NR_set_thread_area:
12178 #if defined(TARGET_MIPS)
12179       cpu_env->active_tc.CP0_UserLocal = arg1;
12180       return 0;
12181 #elif defined(TARGET_CRIS)
12182       if (arg1 & 0xff)
12183           ret = -TARGET_EINVAL;
12184       else {
12185           cpu_env->pregs[PR_PID] = arg1;
12186           ret = 0;
12187       }
12188       return ret;
12189 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12190       return do_set_thread_area(cpu_env, arg1);
12191 #elif defined(TARGET_M68K)
12192       {
12193           TaskState *ts = cpu->opaque;
12194           ts->tp_value = arg1;
12195           return 0;
12196       }
12197 #else
12198       return -TARGET_ENOSYS;
12199 #endif
12200 #endif
12201 #ifdef TARGET_NR_get_thread_area
12202     case TARGET_NR_get_thread_area:
12203 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12204         return do_get_thread_area(cpu_env, arg1);
12205 #elif defined(TARGET_M68K)
12206         {
12207             TaskState *ts = cpu->opaque;
12208             return ts->tp_value;
12209         }
12210 #else
12211         return -TARGET_ENOSYS;
12212 #endif
12213 #endif
12214 #ifdef TARGET_NR_getdomainname
12215     case TARGET_NR_getdomainname:
12216         return -TARGET_ENOSYS;
12217 #endif
12218 
12219 #ifdef TARGET_NR_clock_settime
12220     case TARGET_NR_clock_settime:
12221     {
12222         struct timespec ts;
12223 
12224         ret = target_to_host_timespec(&ts, arg2);
12225         if (!is_error(ret)) {
12226             ret = get_errno(clock_settime(arg1, &ts));
12227         }
12228         return ret;
12229     }
12230 #endif
12231 #ifdef TARGET_NR_clock_settime64
12232     case TARGET_NR_clock_settime64:
12233     {
12234         struct timespec ts;
12235 
12236         ret = target_to_host_timespec64(&ts, arg2);
12237         if (!is_error(ret)) {
12238             ret = get_errno(clock_settime(arg1, &ts));
12239         }
12240         return ret;
12241     }
12242 #endif
12243 #ifdef TARGET_NR_clock_gettime
12244     case TARGET_NR_clock_gettime:
12245     {
12246         struct timespec ts;
12247         ret = get_errno(clock_gettime(arg1, &ts));
12248         if (!is_error(ret)) {
12249             ret = host_to_target_timespec(arg2, &ts);
12250         }
12251         return ret;
12252     }
12253 #endif
12254 #ifdef TARGET_NR_clock_gettime64
12255     case TARGET_NR_clock_gettime64:
12256     {
12257         struct timespec ts;
12258         ret = get_errno(clock_gettime(arg1, &ts));
12259         if (!is_error(ret)) {
12260             ret = host_to_target_timespec64(arg2, &ts);
12261         }
12262         return ret;
12263     }
12264 #endif
12265 #ifdef TARGET_NR_clock_getres
12266     case TARGET_NR_clock_getres:
12267     {
12268         struct timespec ts;
12269         ret = get_errno(clock_getres(arg1, &ts));
12270         if (!is_error(ret)) {
12271             host_to_target_timespec(arg2, &ts);
12272         }
12273         return ret;
12274     }
12275 #endif
12276 #ifdef TARGET_NR_clock_getres_time64
12277     case TARGET_NR_clock_getres_time64:
12278     {
12279         struct timespec ts;
12280         ret = get_errno(clock_getres(arg1, &ts));
12281         if (!is_error(ret)) {
12282             host_to_target_timespec64(arg2, &ts);
12283         }
12284         return ret;
12285     }
12286 #endif
12287 #ifdef TARGET_NR_clock_nanosleep
12288     case TARGET_NR_clock_nanosleep:
12289     {
12290         struct timespec ts;
12291         if (target_to_host_timespec(&ts, arg3)) {
12292             return -TARGET_EFAULT;
12293         }
12294         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12295                                              &ts, arg4 ? &ts : NULL));
12296         /*
12297          * if the call is interrupted by a signal handler, it fails
12298          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12299          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12300          */
12301         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12302             host_to_target_timespec(arg4, &ts)) {
12303               return -TARGET_EFAULT;
12304         }
12305 
12306         return ret;
12307     }
12308 #endif
12309 #ifdef TARGET_NR_clock_nanosleep_time64
12310     case TARGET_NR_clock_nanosleep_time64:
12311     {
12312         struct timespec ts;
12313 
12314         if (target_to_host_timespec64(&ts, arg3)) {
12315             return -TARGET_EFAULT;
12316         }
12317 
12318         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12319                                              &ts, arg4 ? &ts : NULL));
12320 
12321         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12322             host_to_target_timespec64(arg4, &ts)) {
12323             return -TARGET_EFAULT;
12324         }
12325         return ret;
12326     }
12327 #endif
12328 
12329 #if defined(TARGET_NR_set_tid_address)
12330     case TARGET_NR_set_tid_address:
12331     {
12332         TaskState *ts = cpu->opaque;
12333         ts->child_tidptr = arg1;
12334         /* do not call host set_tid_address() syscall, instead return tid() */
12335         return get_errno(sys_gettid());
12336     }
12337 #endif
12338 
12339     case TARGET_NR_tkill:
12340         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12341 
12342     case TARGET_NR_tgkill:
12343         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12344                          target_to_host_signal(arg3)));
12345 
12346 #ifdef TARGET_NR_set_robust_list
12347     case TARGET_NR_set_robust_list:
12348     case TARGET_NR_get_robust_list:
12349         /* The ABI for supporting robust futexes has userspace pass
12350          * the kernel a pointer to a linked list which is updated by
12351          * userspace after the syscall; the list is walked by the kernel
12352          * when the thread exits. Since the linked list in QEMU guest
12353          * memory isn't a valid linked list for the host and we have
12354          * no way to reliably intercept the thread-death event, we can't
12355          * support these. Silently return ENOSYS so that guest userspace
12356          * falls back to a non-robust futex implementation (which should
12357          * be OK except in the corner case of the guest crashing while
12358          * holding a mutex that is shared with another process via
12359          * shared memory).
12360          */
12361         return -TARGET_ENOSYS;
12362 #endif
12363 
12364 #if defined(TARGET_NR_utimensat)
12365     case TARGET_NR_utimensat:
12366         {
12367             struct timespec *tsp, ts[2];
12368             if (!arg3) {
12369                 tsp = NULL;
12370             } else {
12371                 if (target_to_host_timespec(ts, arg3)) {
12372                     return -TARGET_EFAULT;
12373                 }
12374                 if (target_to_host_timespec(ts + 1, arg3 +
12375                                             sizeof(struct target_timespec))) {
12376                     return -TARGET_EFAULT;
12377                 }
12378                 tsp = ts;
12379             }
12380             if (!arg2)
12381                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12382             else {
12383                 if (!(p = lock_user_string(arg2))) {
12384                     return -TARGET_EFAULT;
12385                 }
12386                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12387                 unlock_user(p, arg2, 0);
12388             }
12389         }
12390         return ret;
12391 #endif
12392 #ifdef TARGET_NR_utimensat_time64
12393     case TARGET_NR_utimensat_time64:
12394         {
12395             struct timespec *tsp, ts[2];
12396             if (!arg3) {
12397                 tsp = NULL;
12398             } else {
12399                 if (target_to_host_timespec64(ts, arg3)) {
12400                     return -TARGET_EFAULT;
12401                 }
12402                 if (target_to_host_timespec64(ts + 1, arg3 +
12403                                      sizeof(struct target__kernel_timespec))) {
12404                     return -TARGET_EFAULT;
12405                 }
12406                 tsp = ts;
12407             }
12408             if (!arg2)
12409                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12410             else {
12411                 p = lock_user_string(arg2);
12412                 if (!p) {
12413                     return -TARGET_EFAULT;
12414                 }
12415                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12416                 unlock_user(p, arg2, 0);
12417             }
12418         }
12419         return ret;
12420 #endif
12421 #ifdef TARGET_NR_futex
12422     case TARGET_NR_futex:
12423         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12424 #endif
12425 #ifdef TARGET_NR_futex_time64
12426     case TARGET_NR_futex_time64:
12427         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12428 #endif
12429 #ifdef CONFIG_INOTIFY
12430 #if defined(TARGET_NR_inotify_init)
12431     case TARGET_NR_inotify_init:
12432         ret = get_errno(inotify_init());
12433         if (ret >= 0) {
12434             fd_trans_register(ret, &target_inotify_trans);
12435         }
12436         return ret;
12437 #endif
12438 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12439     case TARGET_NR_inotify_init1:
12440         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12441                                           fcntl_flags_tbl)));
12442         if (ret >= 0) {
12443             fd_trans_register(ret, &target_inotify_trans);
12444         }
12445         return ret;
12446 #endif
12447 #if defined(TARGET_NR_inotify_add_watch)
12448     case TARGET_NR_inotify_add_watch:
12449         p = lock_user_string(arg2);
12450         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12451         unlock_user(p, arg2, 0);
12452         return ret;
12453 #endif
12454 #if defined(TARGET_NR_inotify_rm_watch)
12455     case TARGET_NR_inotify_rm_watch:
12456         return get_errno(inotify_rm_watch(arg1, arg2));
12457 #endif
12458 #endif
12459 
12460 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12461     case TARGET_NR_mq_open:
12462         {
12463             struct mq_attr posix_mq_attr;
12464             struct mq_attr *pposix_mq_attr;
12465             int host_flags;
12466 
12467             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12468             pposix_mq_attr = NULL;
12469             if (arg4) {
12470                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12471                     return -TARGET_EFAULT;
12472                 }
12473                 pposix_mq_attr = &posix_mq_attr;
12474             }
12475             p = lock_user_string(arg1 - 1);
12476             if (!p) {
12477                 return -TARGET_EFAULT;
12478             }
12479             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12480             unlock_user (p, arg1, 0);
12481         }
12482         return ret;
12483 
12484     case TARGET_NR_mq_unlink:
12485         p = lock_user_string(arg1 - 1);
12486         if (!p) {
12487             return -TARGET_EFAULT;
12488         }
12489         ret = get_errno(mq_unlink(p));
12490         unlock_user (p, arg1, 0);
12491         return ret;
12492 
12493 #ifdef TARGET_NR_mq_timedsend
12494     case TARGET_NR_mq_timedsend:
12495         {
12496             struct timespec ts;
12497 
12498             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12499             if (arg5 != 0) {
12500                 if (target_to_host_timespec(&ts, arg5)) {
12501                     return -TARGET_EFAULT;
12502                 }
12503                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12504                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12505                     return -TARGET_EFAULT;
12506                 }
12507             } else {
12508                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12509             }
12510             unlock_user (p, arg2, arg3);
12511         }
12512         return ret;
12513 #endif
12514 #ifdef TARGET_NR_mq_timedsend_time64
12515     case TARGET_NR_mq_timedsend_time64:
12516         {
12517             struct timespec ts;
12518 
12519             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12520             if (arg5 != 0) {
12521                 if (target_to_host_timespec64(&ts, arg5)) {
12522                     return -TARGET_EFAULT;
12523                 }
12524                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12525                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12526                     return -TARGET_EFAULT;
12527                 }
12528             } else {
12529                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12530             }
12531             unlock_user(p, arg2, arg3);
12532         }
12533         return ret;
12534 #endif
12535 
12536 #ifdef TARGET_NR_mq_timedreceive
12537     case TARGET_NR_mq_timedreceive:
12538         {
12539             struct timespec ts;
12540             unsigned int prio;
12541 
12542             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12543             if (arg5 != 0) {
12544                 if (target_to_host_timespec(&ts, arg5)) {
12545                     return -TARGET_EFAULT;
12546                 }
12547                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12548                                                      &prio, &ts));
12549                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12550                     return -TARGET_EFAULT;
12551                 }
12552             } else {
12553                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12554                                                      &prio, NULL));
12555             }
12556             unlock_user (p, arg2, arg3);
12557             if (arg4 != 0)
12558                 put_user_u32(prio, arg4);
12559         }
12560         return ret;
12561 #endif
12562 #ifdef TARGET_NR_mq_timedreceive_time64
12563     case TARGET_NR_mq_timedreceive_time64:
12564         {
12565             struct timespec ts;
12566             unsigned int prio;
12567 
12568             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12569             if (arg5 != 0) {
12570                 if (target_to_host_timespec64(&ts, arg5)) {
12571                     return -TARGET_EFAULT;
12572                 }
12573                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12574                                                      &prio, &ts));
12575                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12576                     return -TARGET_EFAULT;
12577                 }
12578             } else {
12579                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12580                                                      &prio, NULL));
12581             }
12582             unlock_user(p, arg2, arg3);
12583             if (arg4 != 0) {
12584                 put_user_u32(prio, arg4);
12585             }
12586         }
12587         return ret;
12588 #endif
12589 
12590     /* Not implemented for now... */
12591 /*     case TARGET_NR_mq_notify: */
12592 /*         break; */
12593 
12594     case TARGET_NR_mq_getsetattr:
12595         {
12596             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12597             ret = 0;
12598             if (arg2 != 0) {
12599                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12600                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12601                                            &posix_mq_attr_out));
12602             } else if (arg3 != 0) {
12603                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12604             }
12605             if (ret == 0 && arg3 != 0) {
12606                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12607             }
12608         }
12609         return ret;
12610 #endif
12611 
12612 #ifdef CONFIG_SPLICE
12613 #ifdef TARGET_NR_tee
12614     case TARGET_NR_tee:
12615         {
12616             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12617         }
12618         return ret;
12619 #endif
12620 #ifdef TARGET_NR_splice
12621     case TARGET_NR_splice:
12622         {
12623             loff_t loff_in, loff_out;
12624             loff_t *ploff_in = NULL, *ploff_out = NULL;
12625             if (arg2) {
12626                 if (get_user_u64(loff_in, arg2)) {
12627                     return -TARGET_EFAULT;
12628                 }
12629                 ploff_in = &loff_in;
12630             }
12631             if (arg4) {
12632                 if (get_user_u64(loff_out, arg4)) {
12633                     return -TARGET_EFAULT;
12634                 }
12635                 ploff_out = &loff_out;
12636             }
12637             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12638             if (arg2) {
12639                 if (put_user_u64(loff_in, arg2)) {
12640                     return -TARGET_EFAULT;
12641                 }
12642             }
12643             if (arg4) {
12644                 if (put_user_u64(loff_out, arg4)) {
12645                     return -TARGET_EFAULT;
12646                 }
12647             }
12648         }
12649         return ret;
12650 #endif
12651 #ifdef TARGET_NR_vmsplice
12652 	case TARGET_NR_vmsplice:
12653         {
12654             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12655             if (vec != NULL) {
12656                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12657                 unlock_iovec(vec, arg2, arg3, 0);
12658             } else {
12659                 ret = -host_to_target_errno(errno);
12660             }
12661         }
12662         return ret;
12663 #endif
12664 #endif /* CONFIG_SPLICE */
12665 #ifdef CONFIG_EVENTFD
12666 #if defined(TARGET_NR_eventfd)
12667     case TARGET_NR_eventfd:
12668         ret = get_errno(eventfd(arg1, 0));
12669         if (ret >= 0) {
12670             fd_trans_register(ret, &target_eventfd_trans);
12671         }
12672         return ret;
12673 #endif
12674 #if defined(TARGET_NR_eventfd2)
12675     case TARGET_NR_eventfd2:
12676     {
12677         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12678         if (arg2 & TARGET_O_NONBLOCK) {
12679             host_flags |= O_NONBLOCK;
12680         }
12681         if (arg2 & TARGET_O_CLOEXEC) {
12682             host_flags |= O_CLOEXEC;
12683         }
12684         ret = get_errno(eventfd(arg1, host_flags));
12685         if (ret >= 0) {
12686             fd_trans_register(ret, &target_eventfd_trans);
12687         }
12688         return ret;
12689     }
12690 #endif
12691 #endif /* CONFIG_EVENTFD  */
12692 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12693     case TARGET_NR_fallocate:
12694 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12695         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12696                                   target_offset64(arg5, arg6)));
12697 #else
12698         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12699 #endif
12700         return ret;
12701 #endif
12702 #if defined(CONFIG_SYNC_FILE_RANGE)
12703 #if defined(TARGET_NR_sync_file_range)
12704     case TARGET_NR_sync_file_range:
12705 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12706 #if defined(TARGET_MIPS)
12707         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12708                                         target_offset64(arg5, arg6), arg7));
12709 #else
12710         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12711                                         target_offset64(arg4, arg5), arg6));
12712 #endif /* !TARGET_MIPS */
12713 #else
12714         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12715 #endif
12716         return ret;
12717 #endif
12718 #if defined(TARGET_NR_sync_file_range2) || \
12719     defined(TARGET_NR_arm_sync_file_range)
12720 #if defined(TARGET_NR_sync_file_range2)
12721     case TARGET_NR_sync_file_range2:
12722 #endif
12723 #if defined(TARGET_NR_arm_sync_file_range)
12724     case TARGET_NR_arm_sync_file_range:
12725 #endif
12726         /* This is like sync_file_range but the arguments are reordered */
12727 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12728         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12729                                         target_offset64(arg5, arg6), arg2));
12730 #else
12731         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12732 #endif
12733         return ret;
12734 #endif
12735 #endif
12736 #if defined(TARGET_NR_signalfd4)
12737     case TARGET_NR_signalfd4:
12738         return do_signalfd4(arg1, arg2, arg4);
12739 #endif
12740 #if defined(TARGET_NR_signalfd)
12741     case TARGET_NR_signalfd:
12742         return do_signalfd4(arg1, arg2, 0);
12743 #endif
12744 #if defined(CONFIG_EPOLL)
12745 #if defined(TARGET_NR_epoll_create)
12746     case TARGET_NR_epoll_create:
12747         return get_errno(epoll_create(arg1));
12748 #endif
12749 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12750     case TARGET_NR_epoll_create1:
12751         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12752 #endif
12753 #if defined(TARGET_NR_epoll_ctl)
12754     case TARGET_NR_epoll_ctl:
12755     {
12756         struct epoll_event ep;
12757         struct epoll_event *epp = 0;
12758         if (arg4) {
12759             if (arg2 != EPOLL_CTL_DEL) {
12760                 struct target_epoll_event *target_ep;
12761                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12762                     return -TARGET_EFAULT;
12763                 }
12764                 ep.events = tswap32(target_ep->events);
12765                 /*
12766                  * The epoll_data_t union is just opaque data to the kernel,
12767                  * so we transfer all 64 bits across and need not worry what
12768                  * actual data type it is.
12769                  */
12770                 ep.data.u64 = tswap64(target_ep->data.u64);
12771                 unlock_user_struct(target_ep, arg4, 0);
12772             }
12773             /*
12774              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12775              * non-null pointer, even though this argument is ignored.
12776              *
12777              */
12778             epp = &ep;
12779         }
12780         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12781     }
12782 #endif
12783 
12784 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12785 #if defined(TARGET_NR_epoll_wait)
12786     case TARGET_NR_epoll_wait:
12787 #endif
12788 #if defined(TARGET_NR_epoll_pwait)
12789     case TARGET_NR_epoll_pwait:
12790 #endif
12791     {
12792         struct target_epoll_event *target_ep;
12793         struct epoll_event *ep;
12794         int epfd = arg1;
12795         int maxevents = arg3;
12796         int timeout = arg4;
12797 
12798         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12799             return -TARGET_EINVAL;
12800         }
12801 
12802         target_ep = lock_user(VERIFY_WRITE, arg2,
12803                               maxevents * sizeof(struct target_epoll_event), 1);
12804         if (!target_ep) {
12805             return -TARGET_EFAULT;
12806         }
12807 
12808         ep = g_try_new(struct epoll_event, maxevents);
12809         if (!ep) {
12810             unlock_user(target_ep, arg2, 0);
12811             return -TARGET_ENOMEM;
12812         }
12813 
12814         switch (num) {
12815 #if defined(TARGET_NR_epoll_pwait)
12816         case TARGET_NR_epoll_pwait:
12817         {
12818             sigset_t *set = NULL;
12819 
12820             if (arg5) {
12821                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12822                 if (ret != 0) {
12823                     break;
12824                 }
12825             }
12826 
12827             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12828                                              set, SIGSET_T_SIZE));
12829 
12830             if (set) {
12831                 finish_sigsuspend_mask(ret);
12832             }
12833             break;
12834         }
12835 #endif
12836 #if defined(TARGET_NR_epoll_wait)
12837         case TARGET_NR_epoll_wait:
12838             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12839                                              NULL, 0));
12840             break;
12841 #endif
12842         default:
12843             ret = -TARGET_ENOSYS;
12844         }
12845         if (!is_error(ret)) {
12846             int i;
12847             for (i = 0; i < ret; i++) {
12848                 target_ep[i].events = tswap32(ep[i].events);
12849                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12850             }
12851             unlock_user(target_ep, arg2,
12852                         ret * sizeof(struct target_epoll_event));
12853         } else {
12854             unlock_user(target_ep, arg2, 0);
12855         }
12856         g_free(ep);
12857         return ret;
12858     }
12859 #endif
12860 #endif
12861 #ifdef TARGET_NR_prlimit64
12862     case TARGET_NR_prlimit64:
12863     {
12864         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12865         struct target_rlimit64 *target_rnew, *target_rold;
12866         struct host_rlimit64 rnew, rold, *rnewp = 0;
12867         int resource = target_to_host_resource(arg2);
12868 
12869         if (arg3 && (resource != RLIMIT_AS &&
12870                      resource != RLIMIT_DATA &&
12871                      resource != RLIMIT_STACK)) {
12872             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12873                 return -TARGET_EFAULT;
12874             }
12875             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12876             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12877             unlock_user_struct(target_rnew, arg3, 0);
12878             rnewp = &rnew;
12879         }
12880 
12881         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12882         if (!is_error(ret) && arg4) {
12883             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12884                 return -TARGET_EFAULT;
12885             }
12886             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12887             target_rold->rlim_max = tswap64(rold.rlim_max);
12888             unlock_user_struct(target_rold, arg4, 1);
12889         }
12890         return ret;
12891     }
12892 #endif
12893 #ifdef TARGET_NR_gethostname
12894     case TARGET_NR_gethostname:
12895     {
12896         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12897         if (name) {
12898             ret = get_errno(gethostname(name, arg2));
12899             unlock_user(name, arg1, arg2);
12900         } else {
12901             ret = -TARGET_EFAULT;
12902         }
12903         return ret;
12904     }
12905 #endif
12906 #ifdef TARGET_NR_atomic_cmpxchg_32
12907     case TARGET_NR_atomic_cmpxchg_32:
12908     {
12909         /* should use start_exclusive from main.c */
12910         abi_ulong mem_value;
12911         if (get_user_u32(mem_value, arg6)) {
12912             target_siginfo_t info;
12913             info.si_signo = SIGSEGV;
12914             info.si_errno = 0;
12915             info.si_code = TARGET_SEGV_MAPERR;
12916             info._sifields._sigfault._addr = arg6;
12917             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12918             ret = 0xdeadbeef;
12919 
12920         }
12921         if (mem_value == arg2)
12922             put_user_u32(arg1, arg6);
12923         return mem_value;
12924     }
12925 #endif
12926 #ifdef TARGET_NR_atomic_barrier
12927     case TARGET_NR_atomic_barrier:
12928         /* Like the kernel implementation and the
12929            qemu arm barrier, no-op this? */
12930         return 0;
12931 #endif
12932 
12933 #ifdef TARGET_NR_timer_create
12934     case TARGET_NR_timer_create:
12935     {
12936         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12937 
12938         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12939 
12940         int clkid = arg1;
12941         int timer_index = next_free_host_timer();
12942 
12943         if (timer_index < 0) {
12944             ret = -TARGET_EAGAIN;
12945         } else {
12946             timer_t *phtimer = g_posix_timers  + timer_index;
12947 
12948             if (arg2) {
12949                 phost_sevp = &host_sevp;
12950                 ret = target_to_host_sigevent(phost_sevp, arg2);
12951                 if (ret != 0) {
12952                     free_host_timer_slot(timer_index);
12953                     return ret;
12954                 }
12955             }
12956 
12957             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12958             if (ret) {
12959                 free_host_timer_slot(timer_index);
12960             } else {
12961                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12962                     timer_delete(*phtimer);
12963                     free_host_timer_slot(timer_index);
12964                     return -TARGET_EFAULT;
12965                 }
12966             }
12967         }
12968         return ret;
12969     }
12970 #endif
12971 
12972 #ifdef TARGET_NR_timer_settime
12973     case TARGET_NR_timer_settime:
12974     {
12975         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12976          * struct itimerspec * old_value */
12977         target_timer_t timerid = get_timer_id(arg1);
12978 
12979         if (timerid < 0) {
12980             ret = timerid;
12981         } else if (arg3 == 0) {
12982             ret = -TARGET_EINVAL;
12983         } else {
12984             timer_t htimer = g_posix_timers[timerid];
12985             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12986 
12987             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12988                 return -TARGET_EFAULT;
12989             }
12990             ret = get_errno(
12991                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12992             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12993                 return -TARGET_EFAULT;
12994             }
12995         }
12996         return ret;
12997     }
12998 #endif
12999 
13000 #ifdef TARGET_NR_timer_settime64
13001     case TARGET_NR_timer_settime64:
13002     {
13003         target_timer_t timerid = get_timer_id(arg1);
13004 
13005         if (timerid < 0) {
13006             ret = timerid;
13007         } else if (arg3 == 0) {
13008             ret = -TARGET_EINVAL;
13009         } else {
13010             timer_t htimer = g_posix_timers[timerid];
13011             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13012 
13013             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13014                 return -TARGET_EFAULT;
13015             }
13016             ret = get_errno(
13017                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13018             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13019                 return -TARGET_EFAULT;
13020             }
13021         }
13022         return ret;
13023     }
13024 #endif
13025 
13026 #ifdef TARGET_NR_timer_gettime
13027     case TARGET_NR_timer_gettime:
13028     {
13029         /* args: timer_t timerid, struct itimerspec *curr_value */
13030         target_timer_t timerid = get_timer_id(arg1);
13031 
13032         if (timerid < 0) {
13033             ret = timerid;
13034         } else if (!arg2) {
13035             ret = -TARGET_EFAULT;
13036         } else {
13037             timer_t htimer = g_posix_timers[timerid];
13038             struct itimerspec hspec;
13039             ret = get_errno(timer_gettime(htimer, &hspec));
13040 
13041             if (host_to_target_itimerspec(arg2, &hspec)) {
13042                 ret = -TARGET_EFAULT;
13043             }
13044         }
13045         return ret;
13046     }
13047 #endif
13048 
13049 #ifdef TARGET_NR_timer_gettime64
13050     case TARGET_NR_timer_gettime64:
13051     {
13052         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13053         target_timer_t timerid = get_timer_id(arg1);
13054 
13055         if (timerid < 0) {
13056             ret = timerid;
13057         } else if (!arg2) {
13058             ret = -TARGET_EFAULT;
13059         } else {
13060             timer_t htimer = g_posix_timers[timerid];
13061             struct itimerspec hspec;
13062             ret = get_errno(timer_gettime(htimer, &hspec));
13063 
13064             if (host_to_target_itimerspec64(arg2, &hspec)) {
13065                 ret = -TARGET_EFAULT;
13066             }
13067         }
13068         return ret;
13069     }
13070 #endif
13071 
13072 #ifdef TARGET_NR_timer_getoverrun
13073     case TARGET_NR_timer_getoverrun:
13074     {
13075         /* args: timer_t timerid */
13076         target_timer_t timerid = get_timer_id(arg1);
13077 
13078         if (timerid < 0) {
13079             ret = timerid;
13080         } else {
13081             timer_t htimer = g_posix_timers[timerid];
13082             ret = get_errno(timer_getoverrun(htimer));
13083         }
13084         return ret;
13085     }
13086 #endif
13087 
13088 #ifdef TARGET_NR_timer_delete
13089     case TARGET_NR_timer_delete:
13090     {
13091         /* args: timer_t timerid */
13092         target_timer_t timerid = get_timer_id(arg1);
13093 
13094         if (timerid < 0) {
13095             ret = timerid;
13096         } else {
13097             timer_t htimer = g_posix_timers[timerid];
13098             ret = get_errno(timer_delete(htimer));
13099             free_host_timer_slot(timerid);
13100         }
13101         return ret;
13102     }
13103 #endif
13104 
13105 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13106     case TARGET_NR_timerfd_create:
13107         return get_errno(timerfd_create(arg1,
13108                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13109 #endif
13110 
13111 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13112     case TARGET_NR_timerfd_gettime:
13113         {
13114             struct itimerspec its_curr;
13115 
13116             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13117 
13118             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13119                 return -TARGET_EFAULT;
13120             }
13121         }
13122         return ret;
13123 #endif
13124 
13125 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13126     case TARGET_NR_timerfd_gettime64:
13127         {
13128             struct itimerspec its_curr;
13129 
13130             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13131 
13132             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13133                 return -TARGET_EFAULT;
13134             }
13135         }
13136         return ret;
13137 #endif
13138 
13139 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13140     case TARGET_NR_timerfd_settime:
13141         {
13142             struct itimerspec its_new, its_old, *p_new;
13143 
13144             if (arg3) {
13145                 if (target_to_host_itimerspec(&its_new, arg3)) {
13146                     return -TARGET_EFAULT;
13147                 }
13148                 p_new = &its_new;
13149             } else {
13150                 p_new = NULL;
13151             }
13152 
13153             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13154 
13155             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13156                 return -TARGET_EFAULT;
13157             }
13158         }
13159         return ret;
13160 #endif
13161 
13162 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13163     case TARGET_NR_timerfd_settime64:
13164         {
13165             struct itimerspec its_new, its_old, *p_new;
13166 
13167             if (arg3) {
13168                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13169                     return -TARGET_EFAULT;
13170                 }
13171                 p_new = &its_new;
13172             } else {
13173                 p_new = NULL;
13174             }
13175 
13176             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13177 
13178             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13179                 return -TARGET_EFAULT;
13180             }
13181         }
13182         return ret;
13183 #endif
13184 
13185 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13186     case TARGET_NR_ioprio_get:
13187         return get_errno(ioprio_get(arg1, arg2));
13188 #endif
13189 
13190 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13191     case TARGET_NR_ioprio_set:
13192         return get_errno(ioprio_set(arg1, arg2, arg3));
13193 #endif
13194 
13195 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13196     case TARGET_NR_setns:
13197         return get_errno(setns(arg1, arg2));
13198 #endif
13199 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13200     case TARGET_NR_unshare:
13201         return get_errno(unshare(arg1));
13202 #endif
13203 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13204     case TARGET_NR_kcmp:
13205         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13206 #endif
13207 #ifdef TARGET_NR_swapcontext
13208     case TARGET_NR_swapcontext:
13209         /* PowerPC specific.  */
13210         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13211 #endif
13212 #ifdef TARGET_NR_memfd_create
13213     case TARGET_NR_memfd_create:
13214         p = lock_user_string(arg1);
13215         if (!p) {
13216             return -TARGET_EFAULT;
13217         }
13218         ret = get_errno(memfd_create(p, arg2));
13219         fd_trans_unregister(ret);
13220         unlock_user(p, arg1, 0);
13221         return ret;
13222 #endif
13223 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13224     case TARGET_NR_membarrier:
13225         return get_errno(membarrier(arg1, arg2));
13226 #endif
13227 
13228 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13229     case TARGET_NR_copy_file_range:
13230         {
13231             loff_t inoff, outoff;
13232             loff_t *pinoff = NULL, *poutoff = NULL;
13233 
13234             if (arg2) {
13235                 if (get_user_u64(inoff, arg2)) {
13236                     return -TARGET_EFAULT;
13237                 }
13238                 pinoff = &inoff;
13239             }
13240             if (arg4) {
13241                 if (get_user_u64(outoff, arg4)) {
13242                     return -TARGET_EFAULT;
13243                 }
13244                 poutoff = &outoff;
13245             }
13246             /* Do not sign-extend the count parameter. */
13247             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13248                                                  (abi_ulong)arg5, arg6));
13249             if (!is_error(ret) && ret > 0) {
13250                 if (arg2) {
13251                     if (put_user_u64(inoff, arg2)) {
13252                         return -TARGET_EFAULT;
13253                     }
13254                 }
13255                 if (arg4) {
13256                     if (put_user_u64(outoff, arg4)) {
13257                         return -TARGET_EFAULT;
13258                     }
13259                 }
13260             }
13261         }
13262         return ret;
13263 #endif
13264 
13265 #if defined(TARGET_NR_pivot_root)
13266     case TARGET_NR_pivot_root:
13267         {
13268             void *p2;
13269             p = lock_user_string(arg1); /* new_root */
13270             p2 = lock_user_string(arg2); /* put_old */
13271             if (!p || !p2) {
13272                 ret = -TARGET_EFAULT;
13273             } else {
13274                 ret = get_errno(pivot_root(p, p2));
13275             }
13276             unlock_user(p2, arg2, 0);
13277             unlock_user(p, arg1, 0);
13278         }
13279         return ret;
13280 #endif
13281 
13282     default:
13283         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13284         return -TARGET_ENOSYS;
13285     }
13286     return ret;
13287 }
13288 
13289 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13290                     abi_long arg2, abi_long arg3, abi_long arg4,
13291                     abi_long arg5, abi_long arg6, abi_long arg7,
13292                     abi_long arg8)
13293 {
13294     CPUState *cpu = env_cpu(cpu_env);
13295     abi_long ret;
13296 
13297 #ifdef DEBUG_ERESTARTSYS
13298     /* Debug-only code for exercising the syscall-restart code paths
13299      * in the per-architecture cpu main loops: restart every syscall
13300      * the guest makes once before letting it through.
13301      */
13302     {
13303         static bool flag;
13304         flag = !flag;
13305         if (flag) {
13306             return -QEMU_ERESTARTSYS;
13307         }
13308     }
13309 #endif
13310 
13311     record_syscall_start(cpu, num, arg1,
13312                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13313 
13314     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13315         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13316     }
13317 
13318     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13319                       arg5, arg6, arg7, arg8);
13320 
13321     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13322         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13323                           arg3, arg4, arg5, arg6);
13324     }
13325 
13326     record_syscall_return(cpu, num, ret);
13327     return ret;
13328 }
13329