xref: /openbmc/qemu/linux-user/syscall.c (revision 6c3a9247)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_statx __NR_statx
276 
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
280 
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
285 
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
290 
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
293 
294 /* For the 64-bit guest on 32-bit host case we must emulate
295  * getdents using getdents64, because otherwise the host
296  * might hand us back more dirent records than we can fit
297  * into the guest buffer after structure format conversion.
298  * Otherwise we emulate getdents with getdents if the host has it.
299  */
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
303 
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
314           loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318           siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address,int *,tidptr)
325 #endif
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
328           const struct timespec *,timeout,int *,uaddr2,int,val3)
329 #endif
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
332           const struct timespec *,timeout,int *,uaddr2,int,val3)
333 #endif
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
336           unsigned long *, user_mask_ptr);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 /* sched_attr is not defined in glibc */
341 struct sched_attr {
342     uint32_t size;
343     uint32_t sched_policy;
344     uint64_t sched_flags;
345     int32_t sched_nice;
346     uint32_t sched_priority;
347     uint64_t sched_runtime;
348     uint64_t sched_deadline;
349     uint64_t sched_period;
350     uint32_t sched_util_min;
351     uint32_t sched_util_max;
352 };
353 #define __NR_sys_sched_getattr __NR_sched_getattr
354 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
355           unsigned int, size, unsigned int, flags);
356 #define __NR_sys_sched_setattr __NR_sched_setattr
357 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
358           unsigned int, flags);
359 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
360 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
361 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
362 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
363           const struct sched_param *, param);
364 #define __NR_sys_sched_getparam __NR_sched_getparam
365 _syscall2(int, sys_sched_getparam, pid_t, pid,
366           struct sched_param *, param);
367 #define __NR_sys_sched_setparam __NR_sched_setparam
368 _syscall2(int, sys_sched_setparam, pid_t, pid,
369           const struct sched_param *, param);
370 #define __NR_sys_getcpu __NR_getcpu
371 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
372 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
373           void *, arg);
374 _syscall2(int, capget, struct __user_cap_header_struct *, header,
375           struct __user_cap_data_struct *, data);
376 _syscall2(int, capset, struct __user_cap_header_struct *, header,
377           struct __user_cap_data_struct *, data);
378 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
379 _syscall2(int, ioprio_get, int, which, int, who)
380 #endif
381 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
382 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
383 #endif
384 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
385 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
386 #endif
387 
388 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
389 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
390           unsigned long, idx1, unsigned long, idx2)
391 #endif
392 
393 /*
394  * It is assumed that struct statx is architecture independent.
395  */
396 #if defined(TARGET_NR_statx) && defined(__NR_statx)
397 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
398           unsigned int, mask, struct target_statx *, statxbuf)
399 #endif
400 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
401 _syscall2(int, membarrier, int, cmd, int, flags)
402 #endif
403 
404 static const bitmask_transtbl fcntl_flags_tbl[] = {
405   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
406   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
407   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
408   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
409   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
410   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
411   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
412   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
413   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
414   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
415   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
416   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
417   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
418 #if defined(O_DIRECT)
419   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
420 #endif
421 #if defined(O_NOATIME)
422   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
423 #endif
424 #if defined(O_CLOEXEC)
425   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
426 #endif
427 #if defined(O_PATH)
428   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
429 #endif
430 #if defined(O_TMPFILE)
431   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
432 #endif
433   /* Don't terminate the list prematurely on 64-bit host+guest.  */
434 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
435   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
436 #endif
437   { 0, 0, 0, 0 }
438 };
439 
440 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
441 
442 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
443 #if defined(__NR_utimensat)
444 #define __NR_sys_utimensat __NR_utimensat
445 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
446           const struct timespec *,tsp,int,flags)
447 #else
448 static int sys_utimensat(int dirfd, const char *pathname,
449                          const struct timespec times[2], int flags)
450 {
451     errno = ENOSYS;
452     return -1;
453 }
454 #endif
455 #endif /* TARGET_NR_utimensat */
456 
457 #ifdef TARGET_NR_renameat2
458 #if defined(__NR_renameat2)
459 #define __NR_sys_renameat2 __NR_renameat2
460 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
461           const char *, new, unsigned int, flags)
462 #else
463 static int sys_renameat2(int oldfd, const char *old,
464                          int newfd, const char *new, int flags)
465 {
466     if (flags == 0) {
467         return renameat(oldfd, old, newfd, new);
468     }
469     errno = ENOSYS;
470     return -1;
471 }
472 #endif
473 #endif /* TARGET_NR_renameat2 */
474 
475 #ifdef CONFIG_INOTIFY
476 #include <sys/inotify.h>
477 #else
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY  */
484 
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
488 #endif
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64 {
492     uint64_t rlim_cur;
493     uint64_t rlim_max;
494 };
495 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
496           const struct host_rlimit64 *, new_limit,
497           struct host_rlimit64 *, old_limit)
498 #endif
499 
500 
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers[32] = { 0, } ;
504 
505 static inline int next_free_host_timer(void)
506 {
507     int k ;
508     /* FIXME: Does finding the next free slot require a lock? */
509     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
510         if (g_posix_timers[k] == 0) {
511             g_posix_timers[k] = (timer_t) 1;
512             return k;
513         }
514     }
515     return -1;
516 }
517 #endif
518 
519 static inline int host_to_target_errno(int host_errno)
520 {
521     switch (host_errno) {
522 #define E(X)  case X: return TARGET_##X;
523 #include "errnos.c.inc"
524 #undef E
525     default:
526         return host_errno;
527     }
528 }
529 
530 static inline int target_to_host_errno(int target_errno)
531 {
532     switch (target_errno) {
533 #define E(X)  case TARGET_##X: return X;
534 #include "errnos.c.inc"
535 #undef E
536     default:
537         return target_errno;
538     }
539 }
540 
541 static inline abi_long get_errno(abi_long ret)
542 {
543     if (ret == -1)
544         return -host_to_target_errno(errno);
545     else
546         return ret;
547 }
548 
549 const char *target_strerror(int err)
550 {
551     if (err == QEMU_ERESTARTSYS) {
552         return "To be restarted";
553     }
554     if (err == QEMU_ESIGRETURN) {
555         return "Successful exit from sigreturn";
556     }
557 
558     return strerror(target_to_host_errno(err));
559 }
560 
561 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
562 {
563     int i;
564     uint8_t b;
565     if (usize <= ksize) {
566         return 1;
567     }
568     for (i = ksize; i < usize; i++) {
569         if (get_user_u8(b, addr + i)) {
570             return -TARGET_EFAULT;
571         }
572         if (b != 0) {
573             return 0;
574         }
575     }
576     return 1;
577 }
578 
579 #define safe_syscall0(type, name) \
580 static type safe_##name(void) \
581 { \
582     return safe_syscall(__NR_##name); \
583 }
584 
585 #define safe_syscall1(type, name, type1, arg1) \
586 static type safe_##name(type1 arg1) \
587 { \
588     return safe_syscall(__NR_##name, arg1); \
589 }
590 
591 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
592 static type safe_##name(type1 arg1, type2 arg2) \
593 { \
594     return safe_syscall(__NR_##name, arg1, arg2); \
595 }
596 
597 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
599 { \
600     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
601 }
602 
603 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
604     type4, arg4) \
605 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
606 { \
607     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
608 }
609 
610 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
611     type4, arg4, type5, arg5) \
612 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
613     type5 arg5) \
614 { \
615     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
616 }
617 
618 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
619     type4, arg4, type5, arg5, type6, arg6) \
620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
621     type5 arg5, type6 arg6) \
622 { \
623     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
624 }
625 
626 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
627 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
628 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
629               int, flags, mode_t, mode)
630 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
631 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
632               struct rusage *, rusage)
633 #endif
634 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
635               int, options, struct rusage *, rusage)
636 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
637 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
638     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
639 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
640               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
641 #endif
642 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
643 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
644               struct timespec *, tsp, const sigset_t *, sigmask,
645               size_t, sigsetsize)
646 #endif
647 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
648               int, maxevents, int, timeout, const sigset_t *, sigmask,
649               size_t, sigsetsize)
650 #if defined(__NR_futex)
651 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
652               const struct timespec *,timeout,int *,uaddr2,int,val3)
653 #endif
654 #if defined(__NR_futex_time64)
655 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
656               const struct timespec *,timeout,int *,uaddr2,int,val3)
657 #endif
658 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
659 safe_syscall2(int, kill, pid_t, pid, int, sig)
660 safe_syscall2(int, tkill, int, tid, int, sig)
661 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
662 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
663 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
664 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
665               unsigned long, pos_l, unsigned long, pos_h)
666 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
667               unsigned long, pos_l, unsigned long, pos_h)
668 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
669               socklen_t, addrlen)
670 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
671               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
672 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
673               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
674 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
675 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
676 safe_syscall2(int, flock, int, fd, int, operation)
677 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
678 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
679               const struct timespec *, uts, size_t, sigsetsize)
680 #endif
681 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
682               int, flags)
683 #if defined(TARGET_NR_nanosleep)
684 safe_syscall2(int, nanosleep, const struct timespec *, req,
685               struct timespec *, rem)
686 #endif
687 #if defined(TARGET_NR_clock_nanosleep) || \
688     defined(TARGET_NR_clock_nanosleep_time64)
689 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
690               const struct timespec *, req, struct timespec *, rem)
691 #endif
692 #ifdef __NR_ipc
693 #ifdef __s390x__
694 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
695               void *, ptr)
696 #else
697 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
698               void *, ptr, long, fifth)
699 #endif
700 #endif
701 #ifdef __NR_msgsnd
702 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
703               int, flags)
704 #endif
705 #ifdef __NR_msgrcv
706 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
707               long, msgtype, int, flags)
708 #endif
709 #ifdef __NR_semtimedop
710 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
711               unsigned, nsops, const struct timespec *, timeout)
712 #endif
713 #if defined(TARGET_NR_mq_timedsend) || \
714     defined(TARGET_NR_mq_timedsend_time64)
715 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
716               size_t, len, unsigned, prio, const struct timespec *, timeout)
717 #endif
718 #if defined(TARGET_NR_mq_timedreceive) || \
719     defined(TARGET_NR_mq_timedreceive_time64)
720 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
721               size_t, len, unsigned *, prio, const struct timespec *, timeout)
722 #endif
723 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
724 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
725               int, outfd, loff_t *, poutoff, size_t, length,
726               unsigned int, flags)
727 #endif
728 
729 /* We do ioctl like this rather than via safe_syscall3 to preserve the
730  * "third argument might be integer or pointer or not present" behaviour of
731  * the libc function.
732  */
733 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
734 /* Similarly for fcntl. Note that callers must always:
735  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
736  *  use the flock64 struct rather than unsuffixed flock
737  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
738  */
739 #ifdef __NR_fcntl64
740 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
741 #else
742 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
743 #endif
744 
745 static inline int host_to_target_sock_type(int host_type)
746 {
747     int target_type;
748 
749     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
750     case SOCK_DGRAM:
751         target_type = TARGET_SOCK_DGRAM;
752         break;
753     case SOCK_STREAM:
754         target_type = TARGET_SOCK_STREAM;
755         break;
756     default:
757         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
758         break;
759     }
760 
761 #if defined(SOCK_CLOEXEC)
762     if (host_type & SOCK_CLOEXEC) {
763         target_type |= TARGET_SOCK_CLOEXEC;
764     }
765 #endif
766 
767 #if defined(SOCK_NONBLOCK)
768     if (host_type & SOCK_NONBLOCK) {
769         target_type |= TARGET_SOCK_NONBLOCK;
770     }
771 #endif
772 
773     return target_type;
774 }
775 
776 static abi_ulong target_brk;
777 static abi_ulong target_original_brk;
778 static abi_ulong brk_page;
779 
780 void target_set_brk(abi_ulong new_brk)
781 {
782     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
783     brk_page = HOST_PAGE_ALIGN(target_brk);
784 }
785 
786 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
787 #define DEBUGF_BRK(message, args...)
788 
789 /* do_brk() must return target values and target errnos. */
790 abi_long do_brk(abi_ulong new_brk)
791 {
792     abi_long mapped_addr;
793     abi_ulong new_alloc_size;
794 
795     /* brk pointers are always untagged */
796 
797     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
798 
799     if (!new_brk) {
800         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
801         return target_brk;
802     }
803     if (new_brk < target_original_brk) {
804         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
805                    target_brk);
806         return target_brk;
807     }
808 
809     /* If the new brk is less than the highest page reserved to the
810      * target heap allocation, set it and we're almost done...  */
811     if (new_brk <= brk_page) {
812         /* Heap contents are initialized to zero, as for anonymous
813          * mapped pages.  */
814         if (new_brk > target_brk) {
815             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
816         }
817 	target_brk = new_brk;
818         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
819 	return target_brk;
820     }
821 
822     /* We need to allocate more memory after the brk... Note that
823      * we don't use MAP_FIXED because that will map over the top of
824      * any existing mapping (like the one with the host libc or qemu
825      * itself); instead we treat "mapped but at wrong address" as
826      * a failure and unmap again.
827      */
828     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
829     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
830                                         PROT_READ|PROT_WRITE,
831                                         MAP_ANON|MAP_PRIVATE, 0, 0));
832 
833     if (mapped_addr == brk_page) {
834         /* Heap contents are initialized to zero, as for anonymous
835          * mapped pages.  Technically the new pages are already
836          * initialized to zero since they *are* anonymous mapped
837          * pages, however we have to take care with the contents that
838          * come from the remaining part of the previous page: it may
839          * contains garbage data due to a previous heap usage (grown
840          * then shrunken).  */
841         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
842 
843         target_brk = new_brk;
844         brk_page = HOST_PAGE_ALIGN(target_brk);
845         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
846             target_brk);
847         return target_brk;
848     } else if (mapped_addr != -1) {
849         /* Mapped but at wrong address, meaning there wasn't actually
850          * enough space for this brk.
851          */
852         target_munmap(mapped_addr, new_alloc_size);
853         mapped_addr = -1;
854         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
855     }
856     else {
857         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
858     }
859 
860 #if defined(TARGET_ALPHA)
861     /* We (partially) emulate OSF/1 on Alpha, which requires we
862        return a proper errno, not an unchanged brk value.  */
863     return -TARGET_ENOMEM;
864 #endif
865     /* For everything else, return the previous break. */
866     return target_brk;
867 }
868 
869 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
870     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
871 static inline abi_long copy_from_user_fdset(fd_set *fds,
872                                             abi_ulong target_fds_addr,
873                                             int n)
874 {
875     int i, nw, j, k;
876     abi_ulong b, *target_fds;
877 
878     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
879     if (!(target_fds = lock_user(VERIFY_READ,
880                                  target_fds_addr,
881                                  sizeof(abi_ulong) * nw,
882                                  1)))
883         return -TARGET_EFAULT;
884 
885     FD_ZERO(fds);
886     k = 0;
887     for (i = 0; i < nw; i++) {
888         /* grab the abi_ulong */
889         __get_user(b, &target_fds[i]);
890         for (j = 0; j < TARGET_ABI_BITS; j++) {
891             /* check the bit inside the abi_ulong */
892             if ((b >> j) & 1)
893                 FD_SET(k, fds);
894             k++;
895         }
896     }
897 
898     unlock_user(target_fds, target_fds_addr, 0);
899 
900     return 0;
901 }
902 
903 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
904                                                  abi_ulong target_fds_addr,
905                                                  int n)
906 {
907     if (target_fds_addr) {
908         if (copy_from_user_fdset(fds, target_fds_addr, n))
909             return -TARGET_EFAULT;
910         *fds_ptr = fds;
911     } else {
912         *fds_ptr = NULL;
913     }
914     return 0;
915 }
916 
917 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
918                                           const fd_set *fds,
919                                           int n)
920 {
921     int i, nw, j, k;
922     abi_long v;
923     abi_ulong *target_fds;
924 
925     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
926     if (!(target_fds = lock_user(VERIFY_WRITE,
927                                  target_fds_addr,
928                                  sizeof(abi_ulong) * nw,
929                                  0)))
930         return -TARGET_EFAULT;
931 
932     k = 0;
933     for (i = 0; i < nw; i++) {
934         v = 0;
935         for (j = 0; j < TARGET_ABI_BITS; j++) {
936             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
937             k++;
938         }
939         __put_user(v, &target_fds[i]);
940     }
941 
942     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
943 
944     return 0;
945 }
946 #endif
947 
948 #if defined(__alpha__)
949 #define HOST_HZ 1024
950 #else
951 #define HOST_HZ 100
952 #endif
953 
954 static inline abi_long host_to_target_clock_t(long ticks)
955 {
956 #if HOST_HZ == TARGET_HZ
957     return ticks;
958 #else
959     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
960 #endif
961 }
962 
963 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
964                                              const struct rusage *rusage)
965 {
966     struct target_rusage *target_rusage;
967 
968     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
969         return -TARGET_EFAULT;
970     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
971     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
972     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
973     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
974     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
975     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
976     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
977     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
978     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
979     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
980     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
981     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
982     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
983     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
984     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
985     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
986     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
987     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
988     unlock_user_struct(target_rusage, target_addr, 1);
989 
990     return 0;
991 }
992 
993 #ifdef TARGET_NR_setrlimit
994 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
995 {
996     abi_ulong target_rlim_swap;
997     rlim_t result;
998 
999     target_rlim_swap = tswapal(target_rlim);
1000     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1001         return RLIM_INFINITY;
1002 
1003     result = target_rlim_swap;
1004     if (target_rlim_swap != (rlim_t)result)
1005         return RLIM_INFINITY;
1006 
1007     return result;
1008 }
1009 #endif
1010 
1011 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1012 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1013 {
1014     abi_ulong target_rlim_swap;
1015     abi_ulong result;
1016 
1017     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1018         target_rlim_swap = TARGET_RLIM_INFINITY;
1019     else
1020         target_rlim_swap = rlim;
1021     result = tswapal(target_rlim_swap);
1022 
1023     return result;
1024 }
1025 #endif
1026 
1027 static inline int target_to_host_resource(int code)
1028 {
1029     switch (code) {
1030     case TARGET_RLIMIT_AS:
1031         return RLIMIT_AS;
1032     case TARGET_RLIMIT_CORE:
1033         return RLIMIT_CORE;
1034     case TARGET_RLIMIT_CPU:
1035         return RLIMIT_CPU;
1036     case TARGET_RLIMIT_DATA:
1037         return RLIMIT_DATA;
1038     case TARGET_RLIMIT_FSIZE:
1039         return RLIMIT_FSIZE;
1040     case TARGET_RLIMIT_LOCKS:
1041         return RLIMIT_LOCKS;
1042     case TARGET_RLIMIT_MEMLOCK:
1043         return RLIMIT_MEMLOCK;
1044     case TARGET_RLIMIT_MSGQUEUE:
1045         return RLIMIT_MSGQUEUE;
1046     case TARGET_RLIMIT_NICE:
1047         return RLIMIT_NICE;
1048     case TARGET_RLIMIT_NOFILE:
1049         return RLIMIT_NOFILE;
1050     case TARGET_RLIMIT_NPROC:
1051         return RLIMIT_NPROC;
1052     case TARGET_RLIMIT_RSS:
1053         return RLIMIT_RSS;
1054     case TARGET_RLIMIT_RTPRIO:
1055         return RLIMIT_RTPRIO;
1056     case TARGET_RLIMIT_RTTIME:
1057         return RLIMIT_RTTIME;
1058     case TARGET_RLIMIT_SIGPENDING:
1059         return RLIMIT_SIGPENDING;
1060     case TARGET_RLIMIT_STACK:
1061         return RLIMIT_STACK;
1062     default:
1063         return code;
1064     }
1065 }
1066 
1067 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1068                                               abi_ulong target_tv_addr)
1069 {
1070     struct target_timeval *target_tv;
1071 
1072     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1073         return -TARGET_EFAULT;
1074     }
1075 
1076     __get_user(tv->tv_sec, &target_tv->tv_sec);
1077     __get_user(tv->tv_usec, &target_tv->tv_usec);
1078 
1079     unlock_user_struct(target_tv, target_tv_addr, 0);
1080 
1081     return 0;
1082 }
1083 
1084 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1085                                             const struct timeval *tv)
1086 {
1087     struct target_timeval *target_tv;
1088 
1089     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1090         return -TARGET_EFAULT;
1091     }
1092 
1093     __put_user(tv->tv_sec, &target_tv->tv_sec);
1094     __put_user(tv->tv_usec, &target_tv->tv_usec);
1095 
1096     unlock_user_struct(target_tv, target_tv_addr, 1);
1097 
1098     return 0;
1099 }
1100 
1101 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1102 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1103                                                 abi_ulong target_tv_addr)
1104 {
1105     struct target__kernel_sock_timeval *target_tv;
1106 
1107     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1108         return -TARGET_EFAULT;
1109     }
1110 
1111     __get_user(tv->tv_sec, &target_tv->tv_sec);
1112     __get_user(tv->tv_usec, &target_tv->tv_usec);
1113 
1114     unlock_user_struct(target_tv, target_tv_addr, 0);
1115 
1116     return 0;
1117 }
1118 #endif
1119 
1120 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1121                                               const struct timeval *tv)
1122 {
1123     struct target__kernel_sock_timeval *target_tv;
1124 
1125     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1126         return -TARGET_EFAULT;
1127     }
1128 
1129     __put_user(tv->tv_sec, &target_tv->tv_sec);
1130     __put_user(tv->tv_usec, &target_tv->tv_usec);
1131 
1132     unlock_user_struct(target_tv, target_tv_addr, 1);
1133 
1134     return 0;
1135 }
1136 
1137 #if defined(TARGET_NR_futex) || \
1138     defined(TARGET_NR_rt_sigtimedwait) || \
1139     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1140     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1141     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1142     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1143     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1144     defined(TARGET_NR_timer_settime) || \
1145     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1146 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1147                                                abi_ulong target_addr)
1148 {
1149     struct target_timespec *target_ts;
1150 
1151     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1152         return -TARGET_EFAULT;
1153     }
1154     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1155     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1156     unlock_user_struct(target_ts, target_addr, 0);
1157     return 0;
1158 }
1159 #endif
1160 
1161 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1162     defined(TARGET_NR_timer_settime64) || \
1163     defined(TARGET_NR_mq_timedsend_time64) || \
1164     defined(TARGET_NR_mq_timedreceive_time64) || \
1165     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1166     defined(TARGET_NR_clock_nanosleep_time64) || \
1167     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1168     defined(TARGET_NR_utimensat) || \
1169     defined(TARGET_NR_utimensat_time64) || \
1170     defined(TARGET_NR_semtimedop_time64) || \
1171     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1172 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1173                                                  abi_ulong target_addr)
1174 {
1175     struct target__kernel_timespec *target_ts;
1176 
1177     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1178         return -TARGET_EFAULT;
1179     }
1180     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1181     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1182     /* in 32bit mode, this drops the padding */
1183     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1184     unlock_user_struct(target_ts, target_addr, 0);
1185     return 0;
1186 }
1187 #endif
1188 
1189 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1190                                                struct timespec *host_ts)
1191 {
1192     struct target_timespec *target_ts;
1193 
1194     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1195         return -TARGET_EFAULT;
1196     }
1197     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1198     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1199     unlock_user_struct(target_ts, target_addr, 1);
1200     return 0;
1201 }
1202 
1203 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1204                                                  struct timespec *host_ts)
1205 {
1206     struct target__kernel_timespec *target_ts;
1207 
1208     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1209         return -TARGET_EFAULT;
1210     }
1211     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1212     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1213     unlock_user_struct(target_ts, target_addr, 1);
1214     return 0;
1215 }
1216 
1217 #if defined(TARGET_NR_gettimeofday)
1218 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1219                                              struct timezone *tz)
1220 {
1221     struct target_timezone *target_tz;
1222 
1223     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1224         return -TARGET_EFAULT;
1225     }
1226 
1227     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1228     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1229 
1230     unlock_user_struct(target_tz, target_tz_addr, 1);
1231 
1232     return 0;
1233 }
1234 #endif
1235 
1236 #if defined(TARGET_NR_settimeofday)
1237 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1238                                                abi_ulong target_tz_addr)
1239 {
1240     struct target_timezone *target_tz;
1241 
1242     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1243         return -TARGET_EFAULT;
1244     }
1245 
1246     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1247     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1248 
1249     unlock_user_struct(target_tz, target_tz_addr, 0);
1250 
1251     return 0;
1252 }
1253 #endif
1254 
1255 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1256 #include <mqueue.h>
1257 
1258 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1259                                               abi_ulong target_mq_attr_addr)
1260 {
1261     struct target_mq_attr *target_mq_attr;
1262 
1263     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1264                           target_mq_attr_addr, 1))
1265         return -TARGET_EFAULT;
1266 
1267     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1268     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1269     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1270     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1271 
1272     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1273 
1274     return 0;
1275 }
1276 
1277 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1278                                             const struct mq_attr *attr)
1279 {
1280     struct target_mq_attr *target_mq_attr;
1281 
1282     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1283                           target_mq_attr_addr, 0))
1284         return -TARGET_EFAULT;
1285 
1286     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1287     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1288     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1289     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1290 
1291     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1292 
1293     return 0;
1294 }
1295 #endif
1296 
1297 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1298 /* do_select() must return target values and target errnos. */
1299 static abi_long do_select(int n,
1300                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1301                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1302 {
1303     fd_set rfds, wfds, efds;
1304     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1305     struct timeval tv;
1306     struct timespec ts, *ts_ptr;
1307     abi_long ret;
1308 
1309     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1310     if (ret) {
1311         return ret;
1312     }
1313     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1314     if (ret) {
1315         return ret;
1316     }
1317     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1318     if (ret) {
1319         return ret;
1320     }
1321 
1322     if (target_tv_addr) {
1323         if (copy_from_user_timeval(&tv, target_tv_addr))
1324             return -TARGET_EFAULT;
1325         ts.tv_sec = tv.tv_sec;
1326         ts.tv_nsec = tv.tv_usec * 1000;
1327         ts_ptr = &ts;
1328     } else {
1329         ts_ptr = NULL;
1330     }
1331 
1332     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1333                                   ts_ptr, NULL));
1334 
1335     if (!is_error(ret)) {
1336         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1337             return -TARGET_EFAULT;
1338         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1339             return -TARGET_EFAULT;
1340         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1341             return -TARGET_EFAULT;
1342 
1343         if (target_tv_addr) {
1344             tv.tv_sec = ts.tv_sec;
1345             tv.tv_usec = ts.tv_nsec / 1000;
1346             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1347                 return -TARGET_EFAULT;
1348             }
1349         }
1350     }
1351 
1352     return ret;
1353 }
1354 
1355 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1356 static abi_long do_old_select(abi_ulong arg1)
1357 {
1358     struct target_sel_arg_struct *sel;
1359     abi_ulong inp, outp, exp, tvp;
1360     long nsel;
1361 
1362     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1363         return -TARGET_EFAULT;
1364     }
1365 
1366     nsel = tswapal(sel->n);
1367     inp = tswapal(sel->inp);
1368     outp = tswapal(sel->outp);
1369     exp = tswapal(sel->exp);
1370     tvp = tswapal(sel->tvp);
1371 
1372     unlock_user_struct(sel, arg1, 0);
1373 
1374     return do_select(nsel, inp, outp, exp, tvp);
1375 }
1376 #endif
1377 #endif
1378 
1379 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1380 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1381                             abi_long arg4, abi_long arg5, abi_long arg6,
1382                             bool time64)
1383 {
1384     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1385     fd_set rfds, wfds, efds;
1386     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1387     struct timespec ts, *ts_ptr;
1388     abi_long ret;
1389 
1390     /*
1391      * The 6th arg is actually two args smashed together,
1392      * so we cannot use the C library.
1393      */
1394     sigset_t set;
1395     struct {
1396         sigset_t *set;
1397         size_t size;
1398     } sig, *sig_ptr;
1399 
1400     abi_ulong arg_sigset, arg_sigsize, *arg7;
1401     target_sigset_t *target_sigset;
1402 
1403     n = arg1;
1404     rfd_addr = arg2;
1405     wfd_addr = arg3;
1406     efd_addr = arg4;
1407     ts_addr = arg5;
1408 
1409     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1410     if (ret) {
1411         return ret;
1412     }
1413     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1414     if (ret) {
1415         return ret;
1416     }
1417     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1418     if (ret) {
1419         return ret;
1420     }
1421 
1422     /*
1423      * This takes a timespec, and not a timeval, so we cannot
1424      * use the do_select() helper ...
1425      */
1426     if (ts_addr) {
1427         if (time64) {
1428             if (target_to_host_timespec64(&ts, ts_addr)) {
1429                 return -TARGET_EFAULT;
1430             }
1431         } else {
1432             if (target_to_host_timespec(&ts, ts_addr)) {
1433                 return -TARGET_EFAULT;
1434             }
1435         }
1436             ts_ptr = &ts;
1437     } else {
1438         ts_ptr = NULL;
1439     }
1440 
1441     /* Extract the two packed args for the sigset */
1442     if (arg6) {
1443         sig_ptr = &sig;
1444         sig.size = SIGSET_T_SIZE;
1445 
1446         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1447         if (!arg7) {
1448             return -TARGET_EFAULT;
1449         }
1450         arg_sigset = tswapal(arg7[0]);
1451         arg_sigsize = tswapal(arg7[1]);
1452         unlock_user(arg7, arg6, 0);
1453 
1454         if (arg_sigset) {
1455             sig.set = &set;
1456             if (arg_sigsize != sizeof(*target_sigset)) {
1457                 /* Like the kernel, we enforce correct size sigsets */
1458                 return -TARGET_EINVAL;
1459             }
1460             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1461                                       sizeof(*target_sigset), 1);
1462             if (!target_sigset) {
1463                 return -TARGET_EFAULT;
1464             }
1465             target_to_host_sigset(&set, target_sigset);
1466             unlock_user(target_sigset, arg_sigset, 0);
1467         } else {
1468             sig.set = NULL;
1469         }
1470     } else {
1471         sig_ptr = NULL;
1472     }
1473 
1474     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1475                                   ts_ptr, sig_ptr));
1476 
1477     if (!is_error(ret)) {
1478         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1479             return -TARGET_EFAULT;
1480         }
1481         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1482             return -TARGET_EFAULT;
1483         }
1484         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1485             return -TARGET_EFAULT;
1486         }
1487         if (time64) {
1488             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1489                 return -TARGET_EFAULT;
1490             }
1491         } else {
1492             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1493                 return -TARGET_EFAULT;
1494             }
1495         }
1496     }
1497     return ret;
1498 }
1499 #endif
1500 
1501 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1502     defined(TARGET_NR_ppoll_time64)
1503 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1504                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1505 {
1506     struct target_pollfd *target_pfd;
1507     unsigned int nfds = arg2;
1508     struct pollfd *pfd;
1509     unsigned int i;
1510     abi_long ret;
1511 
1512     pfd = NULL;
1513     target_pfd = NULL;
1514     if (nfds) {
1515         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1516             return -TARGET_EINVAL;
1517         }
1518         target_pfd = lock_user(VERIFY_WRITE, arg1,
1519                                sizeof(struct target_pollfd) * nfds, 1);
1520         if (!target_pfd) {
1521             return -TARGET_EFAULT;
1522         }
1523 
1524         pfd = alloca(sizeof(struct pollfd) * nfds);
1525         for (i = 0; i < nfds; i++) {
1526             pfd[i].fd = tswap32(target_pfd[i].fd);
1527             pfd[i].events = tswap16(target_pfd[i].events);
1528         }
1529     }
1530     if (ppoll) {
1531         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1532         target_sigset_t *target_set;
1533         sigset_t _set, *set = &_set;
1534 
1535         if (arg3) {
1536             if (time64) {
1537                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1538                     unlock_user(target_pfd, arg1, 0);
1539                     return -TARGET_EFAULT;
1540                 }
1541             } else {
1542                 if (target_to_host_timespec(timeout_ts, arg3)) {
1543                     unlock_user(target_pfd, arg1, 0);
1544                     return -TARGET_EFAULT;
1545                 }
1546             }
1547         } else {
1548             timeout_ts = NULL;
1549         }
1550 
1551         if (arg4) {
1552             if (arg5 != sizeof(target_sigset_t)) {
1553                 unlock_user(target_pfd, arg1, 0);
1554                 return -TARGET_EINVAL;
1555             }
1556 
1557             target_set = lock_user(VERIFY_READ, arg4,
1558                                    sizeof(target_sigset_t), 1);
1559             if (!target_set) {
1560                 unlock_user(target_pfd, arg1, 0);
1561                 return -TARGET_EFAULT;
1562             }
1563             target_to_host_sigset(set, target_set);
1564         } else {
1565             set = NULL;
1566         }
1567 
1568         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1569                                    set, SIGSET_T_SIZE));
1570 
1571         if (!is_error(ret) && arg3) {
1572             if (time64) {
1573                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1574                     return -TARGET_EFAULT;
1575                 }
1576             } else {
1577                 if (host_to_target_timespec(arg3, timeout_ts)) {
1578                     return -TARGET_EFAULT;
1579                 }
1580             }
1581         }
1582         if (arg4) {
1583             unlock_user(target_set, arg4, 0);
1584         }
1585     } else {
1586           struct timespec ts, *pts;
1587 
1588           if (arg3 >= 0) {
1589               /* Convert ms to secs, ns */
1590               ts.tv_sec = arg3 / 1000;
1591               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1592               pts = &ts;
1593           } else {
1594               /* -ve poll() timeout means "infinite" */
1595               pts = NULL;
1596           }
1597           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1598     }
1599 
1600     if (!is_error(ret)) {
1601         for (i = 0; i < nfds; i++) {
1602             target_pfd[i].revents = tswap16(pfd[i].revents);
1603         }
1604     }
1605     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1606     return ret;
1607 }
1608 #endif
1609 
1610 static abi_long do_pipe2(int host_pipe[], int flags)
1611 {
1612 #ifdef CONFIG_PIPE2
1613     return pipe2(host_pipe, flags);
1614 #else
1615     return -ENOSYS;
1616 #endif
1617 }
1618 
1619 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1620                         int flags, int is_pipe2)
1621 {
1622     int host_pipe[2];
1623     abi_long ret;
1624     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1625 
1626     if (is_error(ret))
1627         return get_errno(ret);
1628 
1629     /* Several targets have special calling conventions for the original
1630        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1631     if (!is_pipe2) {
1632 #if defined(TARGET_ALPHA)
1633         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_MIPS)
1636         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1637         return host_pipe[0];
1638 #elif defined(TARGET_SH4)
1639         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1640         return host_pipe[0];
1641 #elif defined(TARGET_SPARC)
1642         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1643         return host_pipe[0];
1644 #endif
1645     }
1646 
1647     if (put_user_s32(host_pipe[0], pipedes)
1648         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1649         return -TARGET_EFAULT;
1650     return get_errno(ret);
1651 }
1652 
1653 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1654                                               abi_ulong target_addr,
1655                                               socklen_t len)
1656 {
1657     struct target_ip_mreqn *target_smreqn;
1658 
1659     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1660     if (!target_smreqn)
1661         return -TARGET_EFAULT;
1662     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1663     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1664     if (len == sizeof(struct target_ip_mreqn))
1665         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1666     unlock_user(target_smreqn, target_addr, 0);
1667 
1668     return 0;
1669 }
1670 
1671 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1672                                                abi_ulong target_addr,
1673                                                socklen_t len)
1674 {
1675     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1676     sa_family_t sa_family;
1677     struct target_sockaddr *target_saddr;
1678 
1679     if (fd_trans_target_to_host_addr(fd)) {
1680         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1681     }
1682 
1683     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1684     if (!target_saddr)
1685         return -TARGET_EFAULT;
1686 
1687     sa_family = tswap16(target_saddr->sa_family);
1688 
1689     /* Oops. The caller might send a incomplete sun_path; sun_path
1690      * must be terminated by \0 (see the manual page), but
1691      * unfortunately it is quite common to specify sockaddr_un
1692      * length as "strlen(x->sun_path)" while it should be
1693      * "strlen(...) + 1". We'll fix that here if needed.
1694      * Linux kernel has a similar feature.
1695      */
1696 
1697     if (sa_family == AF_UNIX) {
1698         if (len < unix_maxlen && len > 0) {
1699             char *cp = (char*)target_saddr;
1700 
1701             if ( cp[len-1] && !cp[len] )
1702                 len++;
1703         }
1704         if (len > unix_maxlen)
1705             len = unix_maxlen;
1706     }
1707 
1708     memcpy(addr, target_saddr, len);
1709     addr->sa_family = sa_family;
1710     if (sa_family == AF_NETLINK) {
1711         struct sockaddr_nl *nladdr;
1712 
1713         nladdr = (struct sockaddr_nl *)addr;
1714         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1715         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1716     } else if (sa_family == AF_PACKET) {
1717 	struct target_sockaddr_ll *lladdr;
1718 
1719 	lladdr = (struct target_sockaddr_ll *)addr;
1720 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1721 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1722     }
1723     unlock_user(target_saddr, target_addr, 0);
1724 
1725     return 0;
1726 }
1727 
1728 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1729                                                struct sockaddr *addr,
1730                                                socklen_t len)
1731 {
1732     struct target_sockaddr *target_saddr;
1733 
1734     if (len == 0) {
1735         return 0;
1736     }
1737     assert(addr);
1738 
1739     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1740     if (!target_saddr)
1741         return -TARGET_EFAULT;
1742     memcpy(target_saddr, addr, len);
1743     if (len >= offsetof(struct target_sockaddr, sa_family) +
1744         sizeof(target_saddr->sa_family)) {
1745         target_saddr->sa_family = tswap16(addr->sa_family);
1746     }
1747     if (addr->sa_family == AF_NETLINK &&
1748         len >= sizeof(struct target_sockaddr_nl)) {
1749         struct target_sockaddr_nl *target_nl =
1750                (struct target_sockaddr_nl *)target_saddr;
1751         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1752         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1753     } else if (addr->sa_family == AF_PACKET) {
1754         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1755         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1756         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1757     } else if (addr->sa_family == AF_INET6 &&
1758                len >= sizeof(struct target_sockaddr_in6)) {
1759         struct target_sockaddr_in6 *target_in6 =
1760                (struct target_sockaddr_in6 *)target_saddr;
1761         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1762     }
1763     unlock_user(target_saddr, target_addr, len);
1764 
1765     return 0;
1766 }
1767 
1768 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1769                                            struct target_msghdr *target_msgh)
1770 {
1771     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1772     abi_long msg_controllen;
1773     abi_ulong target_cmsg_addr;
1774     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1775     socklen_t space = 0;
1776 
1777     msg_controllen = tswapal(target_msgh->msg_controllen);
1778     if (msg_controllen < sizeof (struct target_cmsghdr))
1779         goto the_end;
1780     target_cmsg_addr = tswapal(target_msgh->msg_control);
1781     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1782     target_cmsg_start = target_cmsg;
1783     if (!target_cmsg)
1784         return -TARGET_EFAULT;
1785 
1786     while (cmsg && target_cmsg) {
1787         void *data = CMSG_DATA(cmsg);
1788         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1789 
1790         int len = tswapal(target_cmsg->cmsg_len)
1791             - sizeof(struct target_cmsghdr);
1792 
1793         space += CMSG_SPACE(len);
1794         if (space > msgh->msg_controllen) {
1795             space -= CMSG_SPACE(len);
1796             /* This is a QEMU bug, since we allocated the payload
1797              * area ourselves (unlike overflow in host-to-target
1798              * conversion, which is just the guest giving us a buffer
1799              * that's too small). It can't happen for the payload types
1800              * we currently support; if it becomes an issue in future
1801              * we would need to improve our allocation strategy to
1802              * something more intelligent than "twice the size of the
1803              * target buffer we're reading from".
1804              */
1805             qemu_log_mask(LOG_UNIMP,
1806                           ("Unsupported ancillary data %d/%d: "
1807                            "unhandled msg size\n"),
1808                           tswap32(target_cmsg->cmsg_level),
1809                           tswap32(target_cmsg->cmsg_type));
1810             break;
1811         }
1812 
1813         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1814             cmsg->cmsg_level = SOL_SOCKET;
1815         } else {
1816             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1817         }
1818         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1819         cmsg->cmsg_len = CMSG_LEN(len);
1820 
1821         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1822             int *fd = (int *)data;
1823             int *target_fd = (int *)target_data;
1824             int i, numfds = len / sizeof(int);
1825 
1826             for (i = 0; i < numfds; i++) {
1827                 __get_user(fd[i], target_fd + i);
1828             }
1829         } else if (cmsg->cmsg_level == SOL_SOCKET
1830                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1831             struct ucred *cred = (struct ucred *)data;
1832             struct target_ucred *target_cred =
1833                 (struct target_ucred *)target_data;
1834 
1835             __get_user(cred->pid, &target_cred->pid);
1836             __get_user(cred->uid, &target_cred->uid);
1837             __get_user(cred->gid, &target_cred->gid);
1838         } else {
1839             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1840                           cmsg->cmsg_level, cmsg->cmsg_type);
1841             memcpy(data, target_data, len);
1842         }
1843 
1844         cmsg = CMSG_NXTHDR(msgh, cmsg);
1845         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1846                                          target_cmsg_start);
1847     }
1848     unlock_user(target_cmsg, target_cmsg_addr, 0);
1849  the_end:
1850     msgh->msg_controllen = space;
1851     return 0;
1852 }
1853 
1854 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1855                                            struct msghdr *msgh)
1856 {
1857     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1858     abi_long msg_controllen;
1859     abi_ulong target_cmsg_addr;
1860     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1861     socklen_t space = 0;
1862 
1863     msg_controllen = tswapal(target_msgh->msg_controllen);
1864     if (msg_controllen < sizeof (struct target_cmsghdr))
1865         goto the_end;
1866     target_cmsg_addr = tswapal(target_msgh->msg_control);
1867     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1868     target_cmsg_start = target_cmsg;
1869     if (!target_cmsg)
1870         return -TARGET_EFAULT;
1871 
1872     while (cmsg && target_cmsg) {
1873         void *data = CMSG_DATA(cmsg);
1874         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1875 
1876         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1877         int tgt_len, tgt_space;
1878 
1879         /* We never copy a half-header but may copy half-data;
1880          * this is Linux's behaviour in put_cmsg(). Note that
1881          * truncation here is a guest problem (which we report
1882          * to the guest via the CTRUNC bit), unlike truncation
1883          * in target_to_host_cmsg, which is a QEMU bug.
1884          */
1885         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1886             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1887             break;
1888         }
1889 
1890         if (cmsg->cmsg_level == SOL_SOCKET) {
1891             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1892         } else {
1893             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1894         }
1895         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1896 
1897         /* Payload types which need a different size of payload on
1898          * the target must adjust tgt_len here.
1899          */
1900         tgt_len = len;
1901         switch (cmsg->cmsg_level) {
1902         case SOL_SOCKET:
1903             switch (cmsg->cmsg_type) {
1904             case SO_TIMESTAMP:
1905                 tgt_len = sizeof(struct target_timeval);
1906                 break;
1907             default:
1908                 break;
1909             }
1910             break;
1911         default:
1912             break;
1913         }
1914 
1915         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1916             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1917             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1918         }
1919 
1920         /* We must now copy-and-convert len bytes of payload
1921          * into tgt_len bytes of destination space. Bear in mind
1922          * that in both source and destination we may be dealing
1923          * with a truncated value!
1924          */
1925         switch (cmsg->cmsg_level) {
1926         case SOL_SOCKET:
1927             switch (cmsg->cmsg_type) {
1928             case SCM_RIGHTS:
1929             {
1930                 int *fd = (int *)data;
1931                 int *target_fd = (int *)target_data;
1932                 int i, numfds = tgt_len / sizeof(int);
1933 
1934                 for (i = 0; i < numfds; i++) {
1935                     __put_user(fd[i], target_fd + i);
1936                 }
1937                 break;
1938             }
1939             case SO_TIMESTAMP:
1940             {
1941                 struct timeval *tv = (struct timeval *)data;
1942                 struct target_timeval *target_tv =
1943                     (struct target_timeval *)target_data;
1944 
1945                 if (len != sizeof(struct timeval) ||
1946                     tgt_len != sizeof(struct target_timeval)) {
1947                     goto unimplemented;
1948                 }
1949 
1950                 /* copy struct timeval to target */
1951                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1952                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1953                 break;
1954             }
1955             case SCM_CREDENTIALS:
1956             {
1957                 struct ucred *cred = (struct ucred *)data;
1958                 struct target_ucred *target_cred =
1959                     (struct target_ucred *)target_data;
1960 
1961                 __put_user(cred->pid, &target_cred->pid);
1962                 __put_user(cred->uid, &target_cred->uid);
1963                 __put_user(cred->gid, &target_cred->gid);
1964                 break;
1965             }
1966             default:
1967                 goto unimplemented;
1968             }
1969             break;
1970 
1971         case SOL_IP:
1972             switch (cmsg->cmsg_type) {
1973             case IP_TTL:
1974             {
1975                 uint32_t *v = (uint32_t *)data;
1976                 uint32_t *t_int = (uint32_t *)target_data;
1977 
1978                 if (len != sizeof(uint32_t) ||
1979                     tgt_len != sizeof(uint32_t)) {
1980                     goto unimplemented;
1981                 }
1982                 __put_user(*v, t_int);
1983                 break;
1984             }
1985             case IP_RECVERR:
1986             {
1987                 struct errhdr_t {
1988                    struct sock_extended_err ee;
1989                    struct sockaddr_in offender;
1990                 };
1991                 struct errhdr_t *errh = (struct errhdr_t *)data;
1992                 struct errhdr_t *target_errh =
1993                     (struct errhdr_t *)target_data;
1994 
1995                 if (len != sizeof(struct errhdr_t) ||
1996                     tgt_len != sizeof(struct errhdr_t)) {
1997                     goto unimplemented;
1998                 }
1999                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2000                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2001                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2002                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2003                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2004                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2005                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2006                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2007                     (void *) &errh->offender, sizeof(errh->offender));
2008                 break;
2009             }
2010             default:
2011                 goto unimplemented;
2012             }
2013             break;
2014 
2015         case SOL_IPV6:
2016             switch (cmsg->cmsg_type) {
2017             case IPV6_HOPLIMIT:
2018             {
2019                 uint32_t *v = (uint32_t *)data;
2020                 uint32_t *t_int = (uint32_t *)target_data;
2021 
2022                 if (len != sizeof(uint32_t) ||
2023                     tgt_len != sizeof(uint32_t)) {
2024                     goto unimplemented;
2025                 }
2026                 __put_user(*v, t_int);
2027                 break;
2028             }
2029             case IPV6_RECVERR:
2030             {
2031                 struct errhdr6_t {
2032                    struct sock_extended_err ee;
2033                    struct sockaddr_in6 offender;
2034                 };
2035                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2036                 struct errhdr6_t *target_errh =
2037                     (struct errhdr6_t *)target_data;
2038 
2039                 if (len != sizeof(struct errhdr6_t) ||
2040                     tgt_len != sizeof(struct errhdr6_t)) {
2041                     goto unimplemented;
2042                 }
2043                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2044                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2045                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2046                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2047                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2048                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2049                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2050                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2051                     (void *) &errh->offender, sizeof(errh->offender));
2052                 break;
2053             }
2054             default:
2055                 goto unimplemented;
2056             }
2057             break;
2058 
2059         default:
2060         unimplemented:
2061             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2062                           cmsg->cmsg_level, cmsg->cmsg_type);
2063             memcpy(target_data, data, MIN(len, tgt_len));
2064             if (tgt_len > len) {
2065                 memset(target_data + len, 0, tgt_len - len);
2066             }
2067         }
2068 
2069         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2070         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2071         if (msg_controllen < tgt_space) {
2072             tgt_space = msg_controllen;
2073         }
2074         msg_controllen -= tgt_space;
2075         space += tgt_space;
2076         cmsg = CMSG_NXTHDR(msgh, cmsg);
2077         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2078                                          target_cmsg_start);
2079     }
2080     unlock_user(target_cmsg, target_cmsg_addr, space);
2081  the_end:
2082     target_msgh->msg_controllen = tswapal(space);
2083     return 0;
2084 }
2085 
2086 /* do_setsockopt() Must return target values and target errnos. */
2087 static abi_long do_setsockopt(int sockfd, int level, int optname,
2088                               abi_ulong optval_addr, socklen_t optlen)
2089 {
2090     abi_long ret;
2091     int val;
2092     struct ip_mreqn *ip_mreq;
2093     struct ip_mreq_source *ip_mreq_source;
2094 
2095     switch(level) {
2096     case SOL_TCP:
2097     case SOL_UDP:
2098         /* TCP and UDP options all take an 'int' value.  */
2099         if (optlen < sizeof(uint32_t))
2100             return -TARGET_EINVAL;
2101 
2102         if (get_user_u32(val, optval_addr))
2103             return -TARGET_EFAULT;
2104         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2105         break;
2106     case SOL_IP:
2107         switch(optname) {
2108         case IP_TOS:
2109         case IP_TTL:
2110         case IP_HDRINCL:
2111         case IP_ROUTER_ALERT:
2112         case IP_RECVOPTS:
2113         case IP_RETOPTS:
2114         case IP_PKTINFO:
2115         case IP_MTU_DISCOVER:
2116         case IP_RECVERR:
2117         case IP_RECVTTL:
2118         case IP_RECVTOS:
2119 #ifdef IP_FREEBIND
2120         case IP_FREEBIND:
2121 #endif
2122         case IP_MULTICAST_TTL:
2123         case IP_MULTICAST_LOOP:
2124             val = 0;
2125             if (optlen >= sizeof(uint32_t)) {
2126                 if (get_user_u32(val, optval_addr))
2127                     return -TARGET_EFAULT;
2128             } else if (optlen >= 1) {
2129                 if (get_user_u8(val, optval_addr))
2130                     return -TARGET_EFAULT;
2131             }
2132             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2133             break;
2134         case IP_ADD_MEMBERSHIP:
2135         case IP_DROP_MEMBERSHIP:
2136             if (optlen < sizeof (struct target_ip_mreq) ||
2137                 optlen > sizeof (struct target_ip_mreqn))
2138                 return -TARGET_EINVAL;
2139 
2140             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2141             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2142             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2143             break;
2144 
2145         case IP_BLOCK_SOURCE:
2146         case IP_UNBLOCK_SOURCE:
2147         case IP_ADD_SOURCE_MEMBERSHIP:
2148         case IP_DROP_SOURCE_MEMBERSHIP:
2149             if (optlen != sizeof (struct target_ip_mreq_source))
2150                 return -TARGET_EINVAL;
2151 
2152             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2153             if (!ip_mreq_source) {
2154                 return -TARGET_EFAULT;
2155             }
2156             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2157             unlock_user (ip_mreq_source, optval_addr, 0);
2158             break;
2159 
2160         default:
2161             goto unimplemented;
2162         }
2163         break;
2164     case SOL_IPV6:
2165         switch (optname) {
2166         case IPV6_MTU_DISCOVER:
2167         case IPV6_MTU:
2168         case IPV6_V6ONLY:
2169         case IPV6_RECVPKTINFO:
2170         case IPV6_UNICAST_HOPS:
2171         case IPV6_MULTICAST_HOPS:
2172         case IPV6_MULTICAST_LOOP:
2173         case IPV6_RECVERR:
2174         case IPV6_RECVHOPLIMIT:
2175         case IPV6_2292HOPLIMIT:
2176         case IPV6_CHECKSUM:
2177         case IPV6_ADDRFORM:
2178         case IPV6_2292PKTINFO:
2179         case IPV6_RECVTCLASS:
2180         case IPV6_RECVRTHDR:
2181         case IPV6_2292RTHDR:
2182         case IPV6_RECVHOPOPTS:
2183         case IPV6_2292HOPOPTS:
2184         case IPV6_RECVDSTOPTS:
2185         case IPV6_2292DSTOPTS:
2186         case IPV6_TCLASS:
2187         case IPV6_ADDR_PREFERENCES:
2188 #ifdef IPV6_RECVPATHMTU
2189         case IPV6_RECVPATHMTU:
2190 #endif
2191 #ifdef IPV6_TRANSPARENT
2192         case IPV6_TRANSPARENT:
2193 #endif
2194 #ifdef IPV6_FREEBIND
2195         case IPV6_FREEBIND:
2196 #endif
2197 #ifdef IPV6_RECVORIGDSTADDR
2198         case IPV6_RECVORIGDSTADDR:
2199 #endif
2200             val = 0;
2201             if (optlen < sizeof(uint32_t)) {
2202                 return -TARGET_EINVAL;
2203             }
2204             if (get_user_u32(val, optval_addr)) {
2205                 return -TARGET_EFAULT;
2206             }
2207             ret = get_errno(setsockopt(sockfd, level, optname,
2208                                        &val, sizeof(val)));
2209             break;
2210         case IPV6_PKTINFO:
2211         {
2212             struct in6_pktinfo pki;
2213 
2214             if (optlen < sizeof(pki)) {
2215                 return -TARGET_EINVAL;
2216             }
2217 
2218             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2219                 return -TARGET_EFAULT;
2220             }
2221 
2222             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2223 
2224             ret = get_errno(setsockopt(sockfd, level, optname,
2225                                        &pki, sizeof(pki)));
2226             break;
2227         }
2228         case IPV6_ADD_MEMBERSHIP:
2229         case IPV6_DROP_MEMBERSHIP:
2230         {
2231             struct ipv6_mreq ipv6mreq;
2232 
2233             if (optlen < sizeof(ipv6mreq)) {
2234                 return -TARGET_EINVAL;
2235             }
2236 
2237             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2238                 return -TARGET_EFAULT;
2239             }
2240 
2241             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2242 
2243             ret = get_errno(setsockopt(sockfd, level, optname,
2244                                        &ipv6mreq, sizeof(ipv6mreq)));
2245             break;
2246         }
2247         default:
2248             goto unimplemented;
2249         }
2250         break;
2251     case SOL_ICMPV6:
2252         switch (optname) {
2253         case ICMPV6_FILTER:
2254         {
2255             struct icmp6_filter icmp6f;
2256 
2257             if (optlen > sizeof(icmp6f)) {
2258                 optlen = sizeof(icmp6f);
2259             }
2260 
2261             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2262                 return -TARGET_EFAULT;
2263             }
2264 
2265             for (val = 0; val < 8; val++) {
2266                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2267             }
2268 
2269             ret = get_errno(setsockopt(sockfd, level, optname,
2270                                        &icmp6f, optlen));
2271             break;
2272         }
2273         default:
2274             goto unimplemented;
2275         }
2276         break;
2277     case SOL_RAW:
2278         switch (optname) {
2279         case ICMP_FILTER:
2280         case IPV6_CHECKSUM:
2281             /* those take an u32 value */
2282             if (optlen < sizeof(uint32_t)) {
2283                 return -TARGET_EINVAL;
2284             }
2285 
2286             if (get_user_u32(val, optval_addr)) {
2287                 return -TARGET_EFAULT;
2288             }
2289             ret = get_errno(setsockopt(sockfd, level, optname,
2290                                        &val, sizeof(val)));
2291             break;
2292 
2293         default:
2294             goto unimplemented;
2295         }
2296         break;
2297 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2298     case SOL_ALG:
2299         switch (optname) {
2300         case ALG_SET_KEY:
2301         {
2302             char *alg_key = g_malloc(optlen);
2303 
2304             if (!alg_key) {
2305                 return -TARGET_ENOMEM;
2306             }
2307             if (copy_from_user(alg_key, optval_addr, optlen)) {
2308                 g_free(alg_key);
2309                 return -TARGET_EFAULT;
2310             }
2311             ret = get_errno(setsockopt(sockfd, level, optname,
2312                                        alg_key, optlen));
2313             g_free(alg_key);
2314             break;
2315         }
2316         case ALG_SET_AEAD_AUTHSIZE:
2317         {
2318             ret = get_errno(setsockopt(sockfd, level, optname,
2319                                        NULL, optlen));
2320             break;
2321         }
2322         default:
2323             goto unimplemented;
2324         }
2325         break;
2326 #endif
2327     case TARGET_SOL_SOCKET:
2328         switch (optname) {
2329         case TARGET_SO_RCVTIMEO:
2330         {
2331                 struct timeval tv;
2332 
2333                 optname = SO_RCVTIMEO;
2334 
2335 set_timeout:
2336                 if (optlen != sizeof(struct target_timeval)) {
2337                     return -TARGET_EINVAL;
2338                 }
2339 
2340                 if (copy_from_user_timeval(&tv, optval_addr)) {
2341                     return -TARGET_EFAULT;
2342                 }
2343 
2344                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2345                                 &tv, sizeof(tv)));
2346                 return ret;
2347         }
2348         case TARGET_SO_SNDTIMEO:
2349                 optname = SO_SNDTIMEO;
2350                 goto set_timeout;
2351         case TARGET_SO_ATTACH_FILTER:
2352         {
2353                 struct target_sock_fprog *tfprog;
2354                 struct target_sock_filter *tfilter;
2355                 struct sock_fprog fprog;
2356                 struct sock_filter *filter;
2357                 int i;
2358 
2359                 if (optlen != sizeof(*tfprog)) {
2360                     return -TARGET_EINVAL;
2361                 }
2362                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2363                     return -TARGET_EFAULT;
2364                 }
2365                 if (!lock_user_struct(VERIFY_READ, tfilter,
2366                                       tswapal(tfprog->filter), 0)) {
2367                     unlock_user_struct(tfprog, optval_addr, 1);
2368                     return -TARGET_EFAULT;
2369                 }
2370 
2371                 fprog.len = tswap16(tfprog->len);
2372                 filter = g_try_new(struct sock_filter, fprog.len);
2373                 if (filter == NULL) {
2374                     unlock_user_struct(tfilter, tfprog->filter, 1);
2375                     unlock_user_struct(tfprog, optval_addr, 1);
2376                     return -TARGET_ENOMEM;
2377                 }
2378                 for (i = 0; i < fprog.len; i++) {
2379                     filter[i].code = tswap16(tfilter[i].code);
2380                     filter[i].jt = tfilter[i].jt;
2381                     filter[i].jf = tfilter[i].jf;
2382                     filter[i].k = tswap32(tfilter[i].k);
2383                 }
2384                 fprog.filter = filter;
2385 
2386                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2387                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2388                 g_free(filter);
2389 
2390                 unlock_user_struct(tfilter, tfprog->filter, 1);
2391                 unlock_user_struct(tfprog, optval_addr, 1);
2392                 return ret;
2393         }
2394 	case TARGET_SO_BINDTODEVICE:
2395 	{
2396 		char *dev_ifname, *addr_ifname;
2397 
2398 		if (optlen > IFNAMSIZ - 1) {
2399 		    optlen = IFNAMSIZ - 1;
2400 		}
2401 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2402 		if (!dev_ifname) {
2403 		    return -TARGET_EFAULT;
2404 		}
2405 		optname = SO_BINDTODEVICE;
2406 		addr_ifname = alloca(IFNAMSIZ);
2407 		memcpy(addr_ifname, dev_ifname, optlen);
2408 		addr_ifname[optlen] = 0;
2409 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2410                                            addr_ifname, optlen));
2411 		unlock_user (dev_ifname, optval_addr, 0);
2412 		return ret;
2413 	}
2414         case TARGET_SO_LINGER:
2415         {
2416                 struct linger lg;
2417                 struct target_linger *tlg;
2418 
2419                 if (optlen != sizeof(struct target_linger)) {
2420                     return -TARGET_EINVAL;
2421                 }
2422                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2423                     return -TARGET_EFAULT;
2424                 }
2425                 __get_user(lg.l_onoff, &tlg->l_onoff);
2426                 __get_user(lg.l_linger, &tlg->l_linger);
2427                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2428                                 &lg, sizeof(lg)));
2429                 unlock_user_struct(tlg, optval_addr, 0);
2430                 return ret;
2431         }
2432             /* Options with 'int' argument.  */
2433         case TARGET_SO_DEBUG:
2434 		optname = SO_DEBUG;
2435 		break;
2436         case TARGET_SO_REUSEADDR:
2437 		optname = SO_REUSEADDR;
2438 		break;
2439 #ifdef SO_REUSEPORT
2440         case TARGET_SO_REUSEPORT:
2441                 optname = SO_REUSEPORT;
2442                 break;
2443 #endif
2444         case TARGET_SO_TYPE:
2445 		optname = SO_TYPE;
2446 		break;
2447         case TARGET_SO_ERROR:
2448 		optname = SO_ERROR;
2449 		break;
2450         case TARGET_SO_DONTROUTE:
2451 		optname = SO_DONTROUTE;
2452 		break;
2453         case TARGET_SO_BROADCAST:
2454 		optname = SO_BROADCAST;
2455 		break;
2456         case TARGET_SO_SNDBUF:
2457 		optname = SO_SNDBUF;
2458 		break;
2459         case TARGET_SO_SNDBUFFORCE:
2460                 optname = SO_SNDBUFFORCE;
2461                 break;
2462         case TARGET_SO_RCVBUF:
2463 		optname = SO_RCVBUF;
2464 		break;
2465         case TARGET_SO_RCVBUFFORCE:
2466                 optname = SO_RCVBUFFORCE;
2467                 break;
2468         case TARGET_SO_KEEPALIVE:
2469 		optname = SO_KEEPALIVE;
2470 		break;
2471         case TARGET_SO_OOBINLINE:
2472 		optname = SO_OOBINLINE;
2473 		break;
2474         case TARGET_SO_NO_CHECK:
2475 		optname = SO_NO_CHECK;
2476 		break;
2477         case TARGET_SO_PRIORITY:
2478 		optname = SO_PRIORITY;
2479 		break;
2480 #ifdef SO_BSDCOMPAT
2481         case TARGET_SO_BSDCOMPAT:
2482 		optname = SO_BSDCOMPAT;
2483 		break;
2484 #endif
2485         case TARGET_SO_PASSCRED:
2486 		optname = SO_PASSCRED;
2487 		break;
2488         case TARGET_SO_PASSSEC:
2489                 optname = SO_PASSSEC;
2490                 break;
2491         case TARGET_SO_TIMESTAMP:
2492 		optname = SO_TIMESTAMP;
2493 		break;
2494         case TARGET_SO_RCVLOWAT:
2495 		optname = SO_RCVLOWAT;
2496 		break;
2497         default:
2498             goto unimplemented;
2499         }
2500 	if (optlen < sizeof(uint32_t))
2501             return -TARGET_EINVAL;
2502 
2503 	if (get_user_u32(val, optval_addr))
2504             return -TARGET_EFAULT;
2505 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2506         break;
2507 #ifdef SOL_NETLINK
2508     case SOL_NETLINK:
2509         switch (optname) {
2510         case NETLINK_PKTINFO:
2511         case NETLINK_ADD_MEMBERSHIP:
2512         case NETLINK_DROP_MEMBERSHIP:
2513         case NETLINK_BROADCAST_ERROR:
2514         case NETLINK_NO_ENOBUFS:
2515 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2516         case NETLINK_LISTEN_ALL_NSID:
2517         case NETLINK_CAP_ACK:
2518 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2519 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2520         case NETLINK_EXT_ACK:
2521 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2522 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2523         case NETLINK_GET_STRICT_CHK:
2524 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2525             break;
2526         default:
2527             goto unimplemented;
2528         }
2529         val = 0;
2530         if (optlen < sizeof(uint32_t)) {
2531             return -TARGET_EINVAL;
2532         }
2533         if (get_user_u32(val, optval_addr)) {
2534             return -TARGET_EFAULT;
2535         }
2536         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2537                                    sizeof(val)));
2538         break;
2539 #endif /* SOL_NETLINK */
2540     default:
2541     unimplemented:
2542         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2543                       level, optname);
2544         ret = -TARGET_ENOPROTOOPT;
2545     }
2546     return ret;
2547 }
2548 
2549 /* do_getsockopt() Must return target values and target errnos. */
2550 static abi_long do_getsockopt(int sockfd, int level, int optname,
2551                               abi_ulong optval_addr, abi_ulong optlen)
2552 {
2553     abi_long ret;
2554     int len, val;
2555     socklen_t lv;
2556 
2557     switch(level) {
2558     case TARGET_SOL_SOCKET:
2559         level = SOL_SOCKET;
2560         switch (optname) {
2561         /* These don't just return a single integer */
2562         case TARGET_SO_PEERNAME:
2563             goto unimplemented;
2564         case TARGET_SO_RCVTIMEO: {
2565             struct timeval tv;
2566             socklen_t tvlen;
2567 
2568             optname = SO_RCVTIMEO;
2569 
2570 get_timeout:
2571             if (get_user_u32(len, optlen)) {
2572                 return -TARGET_EFAULT;
2573             }
2574             if (len < 0) {
2575                 return -TARGET_EINVAL;
2576             }
2577 
2578             tvlen = sizeof(tv);
2579             ret = get_errno(getsockopt(sockfd, level, optname,
2580                                        &tv, &tvlen));
2581             if (ret < 0) {
2582                 return ret;
2583             }
2584             if (len > sizeof(struct target_timeval)) {
2585                 len = sizeof(struct target_timeval);
2586             }
2587             if (copy_to_user_timeval(optval_addr, &tv)) {
2588                 return -TARGET_EFAULT;
2589             }
2590             if (put_user_u32(len, optlen)) {
2591                 return -TARGET_EFAULT;
2592             }
2593             break;
2594         }
2595         case TARGET_SO_SNDTIMEO:
2596             optname = SO_SNDTIMEO;
2597             goto get_timeout;
2598         case TARGET_SO_PEERCRED: {
2599             struct ucred cr;
2600             socklen_t crlen;
2601             struct target_ucred *tcr;
2602 
2603             if (get_user_u32(len, optlen)) {
2604                 return -TARGET_EFAULT;
2605             }
2606             if (len < 0) {
2607                 return -TARGET_EINVAL;
2608             }
2609 
2610             crlen = sizeof(cr);
2611             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2612                                        &cr, &crlen));
2613             if (ret < 0) {
2614                 return ret;
2615             }
2616             if (len > crlen) {
2617                 len = crlen;
2618             }
2619             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2620                 return -TARGET_EFAULT;
2621             }
2622             __put_user(cr.pid, &tcr->pid);
2623             __put_user(cr.uid, &tcr->uid);
2624             __put_user(cr.gid, &tcr->gid);
2625             unlock_user_struct(tcr, optval_addr, 1);
2626             if (put_user_u32(len, optlen)) {
2627                 return -TARGET_EFAULT;
2628             }
2629             break;
2630         }
2631         case TARGET_SO_PEERSEC: {
2632             char *name;
2633 
2634             if (get_user_u32(len, optlen)) {
2635                 return -TARGET_EFAULT;
2636             }
2637             if (len < 0) {
2638                 return -TARGET_EINVAL;
2639             }
2640             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2641             if (!name) {
2642                 return -TARGET_EFAULT;
2643             }
2644             lv = len;
2645             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2646                                        name, &lv));
2647             if (put_user_u32(lv, optlen)) {
2648                 ret = -TARGET_EFAULT;
2649             }
2650             unlock_user(name, optval_addr, lv);
2651             break;
2652         }
2653         case TARGET_SO_LINGER:
2654         {
2655             struct linger lg;
2656             socklen_t lglen;
2657             struct target_linger *tlg;
2658 
2659             if (get_user_u32(len, optlen)) {
2660                 return -TARGET_EFAULT;
2661             }
2662             if (len < 0) {
2663                 return -TARGET_EINVAL;
2664             }
2665 
2666             lglen = sizeof(lg);
2667             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2668                                        &lg, &lglen));
2669             if (ret < 0) {
2670                 return ret;
2671             }
2672             if (len > lglen) {
2673                 len = lglen;
2674             }
2675             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2676                 return -TARGET_EFAULT;
2677             }
2678             __put_user(lg.l_onoff, &tlg->l_onoff);
2679             __put_user(lg.l_linger, &tlg->l_linger);
2680             unlock_user_struct(tlg, optval_addr, 1);
2681             if (put_user_u32(len, optlen)) {
2682                 return -TARGET_EFAULT;
2683             }
2684             break;
2685         }
2686         /* Options with 'int' argument.  */
2687         case TARGET_SO_DEBUG:
2688             optname = SO_DEBUG;
2689             goto int_case;
2690         case TARGET_SO_REUSEADDR:
2691             optname = SO_REUSEADDR;
2692             goto int_case;
2693 #ifdef SO_REUSEPORT
2694         case TARGET_SO_REUSEPORT:
2695             optname = SO_REUSEPORT;
2696             goto int_case;
2697 #endif
2698         case TARGET_SO_TYPE:
2699             optname = SO_TYPE;
2700             goto int_case;
2701         case TARGET_SO_ERROR:
2702             optname = SO_ERROR;
2703             goto int_case;
2704         case TARGET_SO_DONTROUTE:
2705             optname = SO_DONTROUTE;
2706             goto int_case;
2707         case TARGET_SO_BROADCAST:
2708             optname = SO_BROADCAST;
2709             goto int_case;
2710         case TARGET_SO_SNDBUF:
2711             optname = SO_SNDBUF;
2712             goto int_case;
2713         case TARGET_SO_RCVBUF:
2714             optname = SO_RCVBUF;
2715             goto int_case;
2716         case TARGET_SO_KEEPALIVE:
2717             optname = SO_KEEPALIVE;
2718             goto int_case;
2719         case TARGET_SO_OOBINLINE:
2720             optname = SO_OOBINLINE;
2721             goto int_case;
2722         case TARGET_SO_NO_CHECK:
2723             optname = SO_NO_CHECK;
2724             goto int_case;
2725         case TARGET_SO_PRIORITY:
2726             optname = SO_PRIORITY;
2727             goto int_case;
2728 #ifdef SO_BSDCOMPAT
2729         case TARGET_SO_BSDCOMPAT:
2730             optname = SO_BSDCOMPAT;
2731             goto int_case;
2732 #endif
2733         case TARGET_SO_PASSCRED:
2734             optname = SO_PASSCRED;
2735             goto int_case;
2736         case TARGET_SO_TIMESTAMP:
2737             optname = SO_TIMESTAMP;
2738             goto int_case;
2739         case TARGET_SO_RCVLOWAT:
2740             optname = SO_RCVLOWAT;
2741             goto int_case;
2742         case TARGET_SO_ACCEPTCONN:
2743             optname = SO_ACCEPTCONN;
2744             goto int_case;
2745         case TARGET_SO_PROTOCOL:
2746             optname = SO_PROTOCOL;
2747             goto int_case;
2748         case TARGET_SO_DOMAIN:
2749             optname = SO_DOMAIN;
2750             goto int_case;
2751         default:
2752             goto int_case;
2753         }
2754         break;
2755     case SOL_TCP:
2756     case SOL_UDP:
2757         /* TCP and UDP options all take an 'int' value.  */
2758     int_case:
2759         if (get_user_u32(len, optlen))
2760             return -TARGET_EFAULT;
2761         if (len < 0)
2762             return -TARGET_EINVAL;
2763         lv = sizeof(lv);
2764         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2765         if (ret < 0)
2766             return ret;
2767         if (optname == SO_TYPE) {
2768             val = host_to_target_sock_type(val);
2769         }
2770         if (len > lv)
2771             len = lv;
2772         if (len == 4) {
2773             if (put_user_u32(val, optval_addr))
2774                 return -TARGET_EFAULT;
2775         } else {
2776             if (put_user_u8(val, optval_addr))
2777                 return -TARGET_EFAULT;
2778         }
2779         if (put_user_u32(len, optlen))
2780             return -TARGET_EFAULT;
2781         break;
2782     case SOL_IP:
2783         switch(optname) {
2784         case IP_TOS:
2785         case IP_TTL:
2786         case IP_HDRINCL:
2787         case IP_ROUTER_ALERT:
2788         case IP_RECVOPTS:
2789         case IP_RETOPTS:
2790         case IP_PKTINFO:
2791         case IP_MTU_DISCOVER:
2792         case IP_RECVERR:
2793         case IP_RECVTOS:
2794 #ifdef IP_FREEBIND
2795         case IP_FREEBIND:
2796 #endif
2797         case IP_MULTICAST_TTL:
2798         case IP_MULTICAST_LOOP:
2799             if (get_user_u32(len, optlen))
2800                 return -TARGET_EFAULT;
2801             if (len < 0)
2802                 return -TARGET_EINVAL;
2803             lv = sizeof(lv);
2804             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2805             if (ret < 0)
2806                 return ret;
2807             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2808                 len = 1;
2809                 if (put_user_u32(len, optlen)
2810                     || put_user_u8(val, optval_addr))
2811                     return -TARGET_EFAULT;
2812             } else {
2813                 if (len > sizeof(int))
2814                     len = sizeof(int);
2815                 if (put_user_u32(len, optlen)
2816                     || put_user_u32(val, optval_addr))
2817                     return -TARGET_EFAULT;
2818             }
2819             break;
2820         default:
2821             ret = -TARGET_ENOPROTOOPT;
2822             break;
2823         }
2824         break;
2825     case SOL_IPV6:
2826         switch (optname) {
2827         case IPV6_MTU_DISCOVER:
2828         case IPV6_MTU:
2829         case IPV6_V6ONLY:
2830         case IPV6_RECVPKTINFO:
2831         case IPV6_UNICAST_HOPS:
2832         case IPV6_MULTICAST_HOPS:
2833         case IPV6_MULTICAST_LOOP:
2834         case IPV6_RECVERR:
2835         case IPV6_RECVHOPLIMIT:
2836         case IPV6_2292HOPLIMIT:
2837         case IPV6_CHECKSUM:
2838         case IPV6_ADDRFORM:
2839         case IPV6_2292PKTINFO:
2840         case IPV6_RECVTCLASS:
2841         case IPV6_RECVRTHDR:
2842         case IPV6_2292RTHDR:
2843         case IPV6_RECVHOPOPTS:
2844         case IPV6_2292HOPOPTS:
2845         case IPV6_RECVDSTOPTS:
2846         case IPV6_2292DSTOPTS:
2847         case IPV6_TCLASS:
2848         case IPV6_ADDR_PREFERENCES:
2849 #ifdef IPV6_RECVPATHMTU
2850         case IPV6_RECVPATHMTU:
2851 #endif
2852 #ifdef IPV6_TRANSPARENT
2853         case IPV6_TRANSPARENT:
2854 #endif
2855 #ifdef IPV6_FREEBIND
2856         case IPV6_FREEBIND:
2857 #endif
2858 #ifdef IPV6_RECVORIGDSTADDR
2859         case IPV6_RECVORIGDSTADDR:
2860 #endif
2861             if (get_user_u32(len, optlen))
2862                 return -TARGET_EFAULT;
2863             if (len < 0)
2864                 return -TARGET_EINVAL;
2865             lv = sizeof(lv);
2866             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2867             if (ret < 0)
2868                 return ret;
2869             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2870                 len = 1;
2871                 if (put_user_u32(len, optlen)
2872                     || put_user_u8(val, optval_addr))
2873                     return -TARGET_EFAULT;
2874             } else {
2875                 if (len > sizeof(int))
2876                     len = sizeof(int);
2877                 if (put_user_u32(len, optlen)
2878                     || put_user_u32(val, optval_addr))
2879                     return -TARGET_EFAULT;
2880             }
2881             break;
2882         default:
2883             ret = -TARGET_ENOPROTOOPT;
2884             break;
2885         }
2886         break;
2887 #ifdef SOL_NETLINK
2888     case SOL_NETLINK:
2889         switch (optname) {
2890         case NETLINK_PKTINFO:
2891         case NETLINK_BROADCAST_ERROR:
2892         case NETLINK_NO_ENOBUFS:
2893 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2894         case NETLINK_LISTEN_ALL_NSID:
2895         case NETLINK_CAP_ACK:
2896 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2897 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2898         case NETLINK_EXT_ACK:
2899 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2901         case NETLINK_GET_STRICT_CHK:
2902 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2903             if (get_user_u32(len, optlen)) {
2904                 return -TARGET_EFAULT;
2905             }
2906             if (len != sizeof(val)) {
2907                 return -TARGET_EINVAL;
2908             }
2909             lv = len;
2910             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2911             if (ret < 0) {
2912                 return ret;
2913             }
2914             if (put_user_u32(lv, optlen)
2915                 || put_user_u32(val, optval_addr)) {
2916                 return -TARGET_EFAULT;
2917             }
2918             break;
2919 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2920         case NETLINK_LIST_MEMBERSHIPS:
2921         {
2922             uint32_t *results;
2923             int i;
2924             if (get_user_u32(len, optlen)) {
2925                 return -TARGET_EFAULT;
2926             }
2927             if (len < 0) {
2928                 return -TARGET_EINVAL;
2929             }
2930             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2931             if (!results && len > 0) {
2932                 return -TARGET_EFAULT;
2933             }
2934             lv = len;
2935             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2936             if (ret < 0) {
2937                 unlock_user(results, optval_addr, 0);
2938                 return ret;
2939             }
2940             /* swap host endianess to target endianess. */
2941             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2942                 results[i] = tswap32(results[i]);
2943             }
2944             if (put_user_u32(lv, optlen)) {
2945                 return -TARGET_EFAULT;
2946             }
2947             unlock_user(results, optval_addr, 0);
2948             break;
2949         }
2950 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2951         default:
2952             goto unimplemented;
2953         }
2954         break;
2955 #endif /* SOL_NETLINK */
2956     default:
2957     unimplemented:
2958         qemu_log_mask(LOG_UNIMP,
2959                       "getsockopt level=%d optname=%d not yet supported\n",
2960                       level, optname);
2961         ret = -TARGET_EOPNOTSUPP;
2962         break;
2963     }
2964     return ret;
2965 }
2966 
2967 /* Convert target low/high pair representing file offset into the host
2968  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2969  * as the kernel doesn't handle them either.
2970  */
2971 static void target_to_host_low_high(abi_ulong tlow,
2972                                     abi_ulong thigh,
2973                                     unsigned long *hlow,
2974                                     unsigned long *hhigh)
2975 {
2976     uint64_t off = tlow |
2977         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2978         TARGET_LONG_BITS / 2;
2979 
2980     *hlow = off;
2981     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2982 }
2983 
2984 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2985                                 abi_ulong count, int copy)
2986 {
2987     struct target_iovec *target_vec;
2988     struct iovec *vec;
2989     abi_ulong total_len, max_len;
2990     int i;
2991     int err = 0;
2992     bool bad_address = false;
2993 
2994     if (count == 0) {
2995         errno = 0;
2996         return NULL;
2997     }
2998     if (count > IOV_MAX) {
2999         errno = EINVAL;
3000         return NULL;
3001     }
3002 
3003     vec = g_try_new0(struct iovec, count);
3004     if (vec == NULL) {
3005         errno = ENOMEM;
3006         return NULL;
3007     }
3008 
3009     target_vec = lock_user(VERIFY_READ, target_addr,
3010                            count * sizeof(struct target_iovec), 1);
3011     if (target_vec == NULL) {
3012         err = EFAULT;
3013         goto fail2;
3014     }
3015 
3016     /* ??? If host page size > target page size, this will result in a
3017        value larger than what we can actually support.  */
3018     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3019     total_len = 0;
3020 
3021     for (i = 0; i < count; i++) {
3022         abi_ulong base = tswapal(target_vec[i].iov_base);
3023         abi_long len = tswapal(target_vec[i].iov_len);
3024 
3025         if (len < 0) {
3026             err = EINVAL;
3027             goto fail;
3028         } else if (len == 0) {
3029             /* Zero length pointer is ignored.  */
3030             vec[i].iov_base = 0;
3031         } else {
3032             vec[i].iov_base = lock_user(type, base, len, copy);
3033             /* If the first buffer pointer is bad, this is a fault.  But
3034              * subsequent bad buffers will result in a partial write; this
3035              * is realized by filling the vector with null pointers and
3036              * zero lengths. */
3037             if (!vec[i].iov_base) {
3038                 if (i == 0) {
3039                     err = EFAULT;
3040                     goto fail;
3041                 } else {
3042                     bad_address = true;
3043                 }
3044             }
3045             if (bad_address) {
3046                 len = 0;
3047             }
3048             if (len > max_len - total_len) {
3049                 len = max_len - total_len;
3050             }
3051         }
3052         vec[i].iov_len = len;
3053         total_len += len;
3054     }
3055 
3056     unlock_user(target_vec, target_addr, 0);
3057     return vec;
3058 
3059  fail:
3060     while (--i >= 0) {
3061         if (tswapal(target_vec[i].iov_len) > 0) {
3062             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3063         }
3064     }
3065     unlock_user(target_vec, target_addr, 0);
3066  fail2:
3067     g_free(vec);
3068     errno = err;
3069     return NULL;
3070 }
3071 
3072 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3073                          abi_ulong count, int copy)
3074 {
3075     struct target_iovec *target_vec;
3076     int i;
3077 
3078     target_vec = lock_user(VERIFY_READ, target_addr,
3079                            count * sizeof(struct target_iovec), 1);
3080     if (target_vec) {
3081         for (i = 0; i < count; i++) {
3082             abi_ulong base = tswapal(target_vec[i].iov_base);
3083             abi_long len = tswapal(target_vec[i].iov_len);
3084             if (len < 0) {
3085                 break;
3086             }
3087             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3088         }
3089         unlock_user(target_vec, target_addr, 0);
3090     }
3091 
3092     g_free(vec);
3093 }
3094 
3095 static inline int target_to_host_sock_type(int *type)
3096 {
3097     int host_type = 0;
3098     int target_type = *type;
3099 
3100     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3101     case TARGET_SOCK_DGRAM:
3102         host_type = SOCK_DGRAM;
3103         break;
3104     case TARGET_SOCK_STREAM:
3105         host_type = SOCK_STREAM;
3106         break;
3107     default:
3108         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3109         break;
3110     }
3111     if (target_type & TARGET_SOCK_CLOEXEC) {
3112 #if defined(SOCK_CLOEXEC)
3113         host_type |= SOCK_CLOEXEC;
3114 #else
3115         return -TARGET_EINVAL;
3116 #endif
3117     }
3118     if (target_type & TARGET_SOCK_NONBLOCK) {
3119 #if defined(SOCK_NONBLOCK)
3120         host_type |= SOCK_NONBLOCK;
3121 #elif !defined(O_NONBLOCK)
3122         return -TARGET_EINVAL;
3123 #endif
3124     }
3125     *type = host_type;
3126     return 0;
3127 }
3128 
3129 /* Try to emulate socket type flags after socket creation.  */
3130 static int sock_flags_fixup(int fd, int target_type)
3131 {
3132 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3133     if (target_type & TARGET_SOCK_NONBLOCK) {
3134         int flags = fcntl(fd, F_GETFL);
3135         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3136             close(fd);
3137             return -TARGET_EINVAL;
3138         }
3139     }
3140 #endif
3141     return fd;
3142 }
3143 
3144 /* do_socket() Must return target values and target errnos. */
3145 static abi_long do_socket(int domain, int type, int protocol)
3146 {
3147     int target_type = type;
3148     int ret;
3149 
3150     ret = target_to_host_sock_type(&type);
3151     if (ret) {
3152         return ret;
3153     }
3154 
3155     if (domain == PF_NETLINK && !(
3156 #ifdef CONFIG_RTNETLINK
3157          protocol == NETLINK_ROUTE ||
3158 #endif
3159          protocol == NETLINK_KOBJECT_UEVENT ||
3160          protocol == NETLINK_AUDIT)) {
3161         return -TARGET_EPROTONOSUPPORT;
3162     }
3163 
3164     if (domain == AF_PACKET ||
3165         (domain == AF_INET && type == SOCK_PACKET)) {
3166         protocol = tswap16(protocol);
3167     }
3168 
3169     ret = get_errno(socket(domain, type, protocol));
3170     if (ret >= 0) {
3171         ret = sock_flags_fixup(ret, target_type);
3172         if (type == SOCK_PACKET) {
3173             /* Manage an obsolete case :
3174              * if socket type is SOCK_PACKET, bind by name
3175              */
3176             fd_trans_register(ret, &target_packet_trans);
3177         } else if (domain == PF_NETLINK) {
3178             switch (protocol) {
3179 #ifdef CONFIG_RTNETLINK
3180             case NETLINK_ROUTE:
3181                 fd_trans_register(ret, &target_netlink_route_trans);
3182                 break;
3183 #endif
3184             case NETLINK_KOBJECT_UEVENT:
3185                 /* nothing to do: messages are strings */
3186                 break;
3187             case NETLINK_AUDIT:
3188                 fd_trans_register(ret, &target_netlink_audit_trans);
3189                 break;
3190             default:
3191                 g_assert_not_reached();
3192             }
3193         }
3194     }
3195     return ret;
3196 }
3197 
3198 /* do_bind() Must return target values and target errnos. */
3199 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3200                         socklen_t addrlen)
3201 {
3202     void *addr;
3203     abi_long ret;
3204 
3205     if ((int)addrlen < 0) {
3206         return -TARGET_EINVAL;
3207     }
3208 
3209     addr = alloca(addrlen+1);
3210 
3211     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3212     if (ret)
3213         return ret;
3214 
3215     return get_errno(bind(sockfd, addr, addrlen));
3216 }
3217 
3218 /* do_connect() Must return target values and target errnos. */
3219 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3220                            socklen_t addrlen)
3221 {
3222     void *addr;
3223     abi_long ret;
3224 
3225     if ((int)addrlen < 0) {
3226         return -TARGET_EINVAL;
3227     }
3228 
3229     addr = alloca(addrlen+1);
3230 
3231     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3232     if (ret)
3233         return ret;
3234 
3235     return get_errno(safe_connect(sockfd, addr, addrlen));
3236 }
3237 
3238 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3239 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3240                                       int flags, int send)
3241 {
3242     abi_long ret, len;
3243     struct msghdr msg;
3244     abi_ulong count;
3245     struct iovec *vec;
3246     abi_ulong target_vec;
3247 
3248     if (msgp->msg_name) {
3249         msg.msg_namelen = tswap32(msgp->msg_namelen);
3250         msg.msg_name = alloca(msg.msg_namelen+1);
3251         ret = target_to_host_sockaddr(fd, msg.msg_name,
3252                                       tswapal(msgp->msg_name),
3253                                       msg.msg_namelen);
3254         if (ret == -TARGET_EFAULT) {
3255             /* For connected sockets msg_name and msg_namelen must
3256              * be ignored, so returning EFAULT immediately is wrong.
3257              * Instead, pass a bad msg_name to the host kernel, and
3258              * let it decide whether to return EFAULT or not.
3259              */
3260             msg.msg_name = (void *)-1;
3261         } else if (ret) {
3262             goto out2;
3263         }
3264     } else {
3265         msg.msg_name = NULL;
3266         msg.msg_namelen = 0;
3267     }
3268     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3269     msg.msg_control = alloca(msg.msg_controllen);
3270     memset(msg.msg_control, 0, msg.msg_controllen);
3271 
3272     msg.msg_flags = tswap32(msgp->msg_flags);
3273 
3274     count = tswapal(msgp->msg_iovlen);
3275     target_vec = tswapal(msgp->msg_iov);
3276 
3277     if (count > IOV_MAX) {
3278         /* sendrcvmsg returns a different errno for this condition than
3279          * readv/writev, so we must catch it here before lock_iovec() does.
3280          */
3281         ret = -TARGET_EMSGSIZE;
3282         goto out2;
3283     }
3284 
3285     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3286                      target_vec, count, send);
3287     if (vec == NULL) {
3288         ret = -host_to_target_errno(errno);
3289         goto out2;
3290     }
3291     msg.msg_iovlen = count;
3292     msg.msg_iov = vec;
3293 
3294     if (send) {
3295         if (fd_trans_target_to_host_data(fd)) {
3296             void *host_msg;
3297 
3298             host_msg = g_malloc(msg.msg_iov->iov_len);
3299             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3300             ret = fd_trans_target_to_host_data(fd)(host_msg,
3301                                                    msg.msg_iov->iov_len);
3302             if (ret >= 0) {
3303                 msg.msg_iov->iov_base = host_msg;
3304                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3305             }
3306             g_free(host_msg);
3307         } else {
3308             ret = target_to_host_cmsg(&msg, msgp);
3309             if (ret == 0) {
3310                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3311             }
3312         }
3313     } else {
3314         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3315         if (!is_error(ret)) {
3316             len = ret;
3317             if (fd_trans_host_to_target_data(fd)) {
3318                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3319                                                MIN(msg.msg_iov->iov_len, len));
3320             } else {
3321                 ret = host_to_target_cmsg(msgp, &msg);
3322             }
3323             if (!is_error(ret)) {
3324                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3325                 msgp->msg_flags = tswap32(msg.msg_flags);
3326                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3327                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3328                                     msg.msg_name, msg.msg_namelen);
3329                     if (ret) {
3330                         goto out;
3331                     }
3332                 }
3333 
3334                 ret = len;
3335             }
3336         }
3337     }
3338 
3339 out:
3340     unlock_iovec(vec, target_vec, count, !send);
3341 out2:
3342     return ret;
3343 }
3344 
3345 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3346                                int flags, int send)
3347 {
3348     abi_long ret;
3349     struct target_msghdr *msgp;
3350 
3351     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3352                           msgp,
3353                           target_msg,
3354                           send ? 1 : 0)) {
3355         return -TARGET_EFAULT;
3356     }
3357     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3358     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3359     return ret;
3360 }
3361 
3362 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3363  * so it might not have this *mmsg-specific flag either.
3364  */
3365 #ifndef MSG_WAITFORONE
3366 #define MSG_WAITFORONE 0x10000
3367 #endif
3368 
3369 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3370                                 unsigned int vlen, unsigned int flags,
3371                                 int send)
3372 {
3373     struct target_mmsghdr *mmsgp;
3374     abi_long ret = 0;
3375     int i;
3376 
3377     if (vlen > UIO_MAXIOV) {
3378         vlen = UIO_MAXIOV;
3379     }
3380 
3381     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3382     if (!mmsgp) {
3383         return -TARGET_EFAULT;
3384     }
3385 
3386     for (i = 0; i < vlen; i++) {
3387         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3388         if (is_error(ret)) {
3389             break;
3390         }
3391         mmsgp[i].msg_len = tswap32(ret);
3392         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3393         if (flags & MSG_WAITFORONE) {
3394             flags |= MSG_DONTWAIT;
3395         }
3396     }
3397 
3398     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3399 
3400     /* Return number of datagrams sent if we sent any at all;
3401      * otherwise return the error.
3402      */
3403     if (i) {
3404         return i;
3405     }
3406     return ret;
3407 }
3408 
3409 /* do_accept4() Must return target values and target errnos. */
3410 static abi_long do_accept4(int fd, abi_ulong target_addr,
3411                            abi_ulong target_addrlen_addr, int flags)
3412 {
3413     socklen_t addrlen, ret_addrlen;
3414     void *addr;
3415     abi_long ret;
3416     int host_flags;
3417 
3418     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3419 
3420     if (target_addr == 0) {
3421         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3422     }
3423 
3424     /* linux returns EFAULT if addrlen pointer is invalid */
3425     if (get_user_u32(addrlen, target_addrlen_addr))
3426         return -TARGET_EFAULT;
3427 
3428     if ((int)addrlen < 0) {
3429         return -TARGET_EINVAL;
3430     }
3431 
3432     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3433         return -TARGET_EFAULT;
3434     }
3435 
3436     addr = alloca(addrlen);
3437 
3438     ret_addrlen = addrlen;
3439     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3440     if (!is_error(ret)) {
3441         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3442         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3443             ret = -TARGET_EFAULT;
3444         }
3445     }
3446     return ret;
3447 }
3448 
3449 /* do_getpeername() Must return target values and target errnos. */
3450 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3451                                abi_ulong target_addrlen_addr)
3452 {
3453     socklen_t addrlen, ret_addrlen;
3454     void *addr;
3455     abi_long ret;
3456 
3457     if (get_user_u32(addrlen, target_addrlen_addr))
3458         return -TARGET_EFAULT;
3459 
3460     if ((int)addrlen < 0) {
3461         return -TARGET_EINVAL;
3462     }
3463 
3464     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3465         return -TARGET_EFAULT;
3466     }
3467 
3468     addr = alloca(addrlen);
3469 
3470     ret_addrlen = addrlen;
3471     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3472     if (!is_error(ret)) {
3473         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3474         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3475             ret = -TARGET_EFAULT;
3476         }
3477     }
3478     return ret;
3479 }
3480 
3481 /* do_getsockname() Must return target values and target errnos. */
3482 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3483                                abi_ulong target_addrlen_addr)
3484 {
3485     socklen_t addrlen, ret_addrlen;
3486     void *addr;
3487     abi_long ret;
3488 
3489     if (get_user_u32(addrlen, target_addrlen_addr))
3490         return -TARGET_EFAULT;
3491 
3492     if ((int)addrlen < 0) {
3493         return -TARGET_EINVAL;
3494     }
3495 
3496     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3497         return -TARGET_EFAULT;
3498     }
3499 
3500     addr = alloca(addrlen);
3501 
3502     ret_addrlen = addrlen;
3503     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3504     if (!is_error(ret)) {
3505         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3506         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3507             ret = -TARGET_EFAULT;
3508         }
3509     }
3510     return ret;
3511 }
3512 
3513 /* do_socketpair() Must return target values and target errnos. */
3514 static abi_long do_socketpair(int domain, int type, int protocol,
3515                               abi_ulong target_tab_addr)
3516 {
3517     int tab[2];
3518     abi_long ret;
3519 
3520     target_to_host_sock_type(&type);
3521 
3522     ret = get_errno(socketpair(domain, type, protocol, tab));
3523     if (!is_error(ret)) {
3524         if (put_user_s32(tab[0], target_tab_addr)
3525             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3526             ret = -TARGET_EFAULT;
3527     }
3528     return ret;
3529 }
3530 
3531 /* do_sendto() Must return target values and target errnos. */
3532 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3533                           abi_ulong target_addr, socklen_t addrlen)
3534 {
3535     void *addr;
3536     void *host_msg;
3537     void *copy_msg = NULL;
3538     abi_long ret;
3539 
3540     if ((int)addrlen < 0) {
3541         return -TARGET_EINVAL;
3542     }
3543 
3544     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3545     if (!host_msg)
3546         return -TARGET_EFAULT;
3547     if (fd_trans_target_to_host_data(fd)) {
3548         copy_msg = host_msg;
3549         host_msg = g_malloc(len);
3550         memcpy(host_msg, copy_msg, len);
3551         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3552         if (ret < 0) {
3553             goto fail;
3554         }
3555     }
3556     if (target_addr) {
3557         addr = alloca(addrlen+1);
3558         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3559         if (ret) {
3560             goto fail;
3561         }
3562         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3563     } else {
3564         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3565     }
3566 fail:
3567     if (copy_msg) {
3568         g_free(host_msg);
3569         host_msg = copy_msg;
3570     }
3571     unlock_user(host_msg, msg, 0);
3572     return ret;
3573 }
3574 
3575 /* do_recvfrom() Must return target values and target errnos. */
3576 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3577                             abi_ulong target_addr,
3578                             abi_ulong target_addrlen)
3579 {
3580     socklen_t addrlen, ret_addrlen;
3581     void *addr;
3582     void *host_msg;
3583     abi_long ret;
3584 
3585     if (!msg) {
3586         host_msg = NULL;
3587     } else {
3588         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3589         if (!host_msg) {
3590             return -TARGET_EFAULT;
3591         }
3592     }
3593     if (target_addr) {
3594         if (get_user_u32(addrlen, target_addrlen)) {
3595             ret = -TARGET_EFAULT;
3596             goto fail;
3597         }
3598         if ((int)addrlen < 0) {
3599             ret = -TARGET_EINVAL;
3600             goto fail;
3601         }
3602         addr = alloca(addrlen);
3603         ret_addrlen = addrlen;
3604         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3605                                       addr, &ret_addrlen));
3606     } else {
3607         addr = NULL; /* To keep compiler quiet.  */
3608         addrlen = 0; /* To keep compiler quiet.  */
3609         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3610     }
3611     if (!is_error(ret)) {
3612         if (fd_trans_host_to_target_data(fd)) {
3613             abi_long trans;
3614             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3615             if (is_error(trans)) {
3616                 ret = trans;
3617                 goto fail;
3618             }
3619         }
3620         if (target_addr) {
3621             host_to_target_sockaddr(target_addr, addr,
3622                                     MIN(addrlen, ret_addrlen));
3623             if (put_user_u32(ret_addrlen, target_addrlen)) {
3624                 ret = -TARGET_EFAULT;
3625                 goto fail;
3626             }
3627         }
3628         unlock_user(host_msg, msg, len);
3629     } else {
3630 fail:
3631         unlock_user(host_msg, msg, 0);
3632     }
3633     return ret;
3634 }
3635 
3636 #ifdef TARGET_NR_socketcall
3637 /* do_socketcall() must return target values and target errnos. */
3638 static abi_long do_socketcall(int num, abi_ulong vptr)
3639 {
3640     static const unsigned nargs[] = { /* number of arguments per operation */
3641         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3642         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3643         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3644         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3645         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3646         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3647         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3648         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3649         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3650         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3651         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3652         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3653         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3654         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3655         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3656         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3657         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3658         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3659         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3660         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3661     };
3662     abi_long a[6]; /* max 6 args */
3663     unsigned i;
3664 
3665     /* check the range of the first argument num */
3666     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3667     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3668         return -TARGET_EINVAL;
3669     }
3670     /* ensure we have space for args */
3671     if (nargs[num] > ARRAY_SIZE(a)) {
3672         return -TARGET_EINVAL;
3673     }
3674     /* collect the arguments in a[] according to nargs[] */
3675     for (i = 0; i < nargs[num]; ++i) {
3676         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3677             return -TARGET_EFAULT;
3678         }
3679     }
3680     /* now when we have the args, invoke the appropriate underlying function */
3681     switch (num) {
3682     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3683         return do_socket(a[0], a[1], a[2]);
3684     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3685         return do_bind(a[0], a[1], a[2]);
3686     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3687         return do_connect(a[0], a[1], a[2]);
3688     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3689         return get_errno(listen(a[0], a[1]));
3690     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3691         return do_accept4(a[0], a[1], a[2], 0);
3692     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3693         return do_getsockname(a[0], a[1], a[2]);
3694     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3695         return do_getpeername(a[0], a[1], a[2]);
3696     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3697         return do_socketpair(a[0], a[1], a[2], a[3]);
3698     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3699         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3700     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3701         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3702     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3703         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3704     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3705         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3706     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3707         return get_errno(shutdown(a[0], a[1]));
3708     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3709         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3710     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3711         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3712     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3713         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3714     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3715         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3716     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3717         return do_accept4(a[0], a[1], a[2], a[3]);
3718     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3719         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3720     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3721         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3722     default:
3723         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3724         return -TARGET_EINVAL;
3725     }
3726 }
3727 #endif
3728 
3729 #define N_SHM_REGIONS	32
3730 
3731 static struct shm_region {
3732     abi_ulong start;
3733     abi_ulong size;
3734     bool in_use;
3735 } shm_regions[N_SHM_REGIONS];
3736 
3737 #ifndef TARGET_SEMID64_DS
3738 /* asm-generic version of this struct */
3739 struct target_semid64_ds
3740 {
3741   struct target_ipc_perm sem_perm;
3742   abi_ulong sem_otime;
3743 #if TARGET_ABI_BITS == 32
3744   abi_ulong __unused1;
3745 #endif
3746   abi_ulong sem_ctime;
3747 #if TARGET_ABI_BITS == 32
3748   abi_ulong __unused2;
3749 #endif
3750   abi_ulong sem_nsems;
3751   abi_ulong __unused3;
3752   abi_ulong __unused4;
3753 };
3754 #endif
3755 
3756 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3757                                                abi_ulong target_addr)
3758 {
3759     struct target_ipc_perm *target_ip;
3760     struct target_semid64_ds *target_sd;
3761 
3762     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3763         return -TARGET_EFAULT;
3764     target_ip = &(target_sd->sem_perm);
3765     host_ip->__key = tswap32(target_ip->__key);
3766     host_ip->uid = tswap32(target_ip->uid);
3767     host_ip->gid = tswap32(target_ip->gid);
3768     host_ip->cuid = tswap32(target_ip->cuid);
3769     host_ip->cgid = tswap32(target_ip->cgid);
3770 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3771     host_ip->mode = tswap32(target_ip->mode);
3772 #else
3773     host_ip->mode = tswap16(target_ip->mode);
3774 #endif
3775 #if defined(TARGET_PPC)
3776     host_ip->__seq = tswap32(target_ip->__seq);
3777 #else
3778     host_ip->__seq = tswap16(target_ip->__seq);
3779 #endif
3780     unlock_user_struct(target_sd, target_addr, 0);
3781     return 0;
3782 }
3783 
3784 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3785                                                struct ipc_perm *host_ip)
3786 {
3787     struct target_ipc_perm *target_ip;
3788     struct target_semid64_ds *target_sd;
3789 
3790     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3791         return -TARGET_EFAULT;
3792     target_ip = &(target_sd->sem_perm);
3793     target_ip->__key = tswap32(host_ip->__key);
3794     target_ip->uid = tswap32(host_ip->uid);
3795     target_ip->gid = tswap32(host_ip->gid);
3796     target_ip->cuid = tswap32(host_ip->cuid);
3797     target_ip->cgid = tswap32(host_ip->cgid);
3798 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3799     target_ip->mode = tswap32(host_ip->mode);
3800 #else
3801     target_ip->mode = tswap16(host_ip->mode);
3802 #endif
3803 #if defined(TARGET_PPC)
3804     target_ip->__seq = tswap32(host_ip->__seq);
3805 #else
3806     target_ip->__seq = tswap16(host_ip->__seq);
3807 #endif
3808     unlock_user_struct(target_sd, target_addr, 1);
3809     return 0;
3810 }
3811 
3812 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3813                                                abi_ulong target_addr)
3814 {
3815     struct target_semid64_ds *target_sd;
3816 
3817     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3818         return -TARGET_EFAULT;
3819     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3820         return -TARGET_EFAULT;
3821     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3822     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3823     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3824     unlock_user_struct(target_sd, target_addr, 0);
3825     return 0;
3826 }
3827 
3828 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3829                                                struct semid_ds *host_sd)
3830 {
3831     struct target_semid64_ds *target_sd;
3832 
3833     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3834         return -TARGET_EFAULT;
3835     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3836         return -TARGET_EFAULT;
3837     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3838     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3839     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3840     unlock_user_struct(target_sd, target_addr, 1);
3841     return 0;
3842 }
3843 
3844 struct target_seminfo {
3845     int semmap;
3846     int semmni;
3847     int semmns;
3848     int semmnu;
3849     int semmsl;
3850     int semopm;
3851     int semume;
3852     int semusz;
3853     int semvmx;
3854     int semaem;
3855 };
3856 
3857 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3858                                               struct seminfo *host_seminfo)
3859 {
3860     struct target_seminfo *target_seminfo;
3861     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3862         return -TARGET_EFAULT;
3863     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3864     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3865     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3866     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3867     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3868     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3869     __put_user(host_seminfo->semume, &target_seminfo->semume);
3870     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3871     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3872     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3873     unlock_user_struct(target_seminfo, target_addr, 1);
3874     return 0;
3875 }
3876 
3877 union semun {
3878 	int val;
3879 	struct semid_ds *buf;
3880 	unsigned short *array;
3881 	struct seminfo *__buf;
3882 };
3883 
3884 union target_semun {
3885 	int val;
3886 	abi_ulong buf;
3887 	abi_ulong array;
3888 	abi_ulong __buf;
3889 };
3890 
3891 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3892                                                abi_ulong target_addr)
3893 {
3894     int nsems;
3895     unsigned short *array;
3896     union semun semun;
3897     struct semid_ds semid_ds;
3898     int i, ret;
3899 
3900     semun.buf = &semid_ds;
3901 
3902     ret = semctl(semid, 0, IPC_STAT, semun);
3903     if (ret == -1)
3904         return get_errno(ret);
3905 
3906     nsems = semid_ds.sem_nsems;
3907 
3908     *host_array = g_try_new(unsigned short, nsems);
3909     if (!*host_array) {
3910         return -TARGET_ENOMEM;
3911     }
3912     array = lock_user(VERIFY_READ, target_addr,
3913                       nsems*sizeof(unsigned short), 1);
3914     if (!array) {
3915         g_free(*host_array);
3916         return -TARGET_EFAULT;
3917     }
3918 
3919     for(i=0; i<nsems; i++) {
3920         __get_user((*host_array)[i], &array[i]);
3921     }
3922     unlock_user(array, target_addr, 0);
3923 
3924     return 0;
3925 }
3926 
3927 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3928                                                unsigned short **host_array)
3929 {
3930     int nsems;
3931     unsigned short *array;
3932     union semun semun;
3933     struct semid_ds semid_ds;
3934     int i, ret;
3935 
3936     semun.buf = &semid_ds;
3937 
3938     ret = semctl(semid, 0, IPC_STAT, semun);
3939     if (ret == -1)
3940         return get_errno(ret);
3941 
3942     nsems = semid_ds.sem_nsems;
3943 
3944     array = lock_user(VERIFY_WRITE, target_addr,
3945                       nsems*sizeof(unsigned short), 0);
3946     if (!array)
3947         return -TARGET_EFAULT;
3948 
3949     for(i=0; i<nsems; i++) {
3950         __put_user((*host_array)[i], &array[i]);
3951     }
3952     g_free(*host_array);
3953     unlock_user(array, target_addr, 1);
3954 
3955     return 0;
3956 }
3957 
3958 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3959                                  abi_ulong target_arg)
3960 {
3961     union target_semun target_su = { .buf = target_arg };
3962     union semun arg;
3963     struct semid_ds dsarg;
3964     unsigned short *array = NULL;
3965     struct seminfo seminfo;
3966     abi_long ret = -TARGET_EINVAL;
3967     abi_long err;
3968     cmd &= 0xff;
3969 
3970     switch( cmd ) {
3971 	case GETVAL:
3972 	case SETVAL:
3973             /* In 64 bit cross-endian situations, we will erroneously pick up
3974              * the wrong half of the union for the "val" element.  To rectify
3975              * this, the entire 8-byte structure is byteswapped, followed by
3976 	     * a swap of the 4 byte val field. In other cases, the data is
3977 	     * already in proper host byte order. */
3978 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3979 		target_su.buf = tswapal(target_su.buf);
3980 		arg.val = tswap32(target_su.val);
3981 	    } else {
3982 		arg.val = target_su.val;
3983 	    }
3984             ret = get_errno(semctl(semid, semnum, cmd, arg));
3985             break;
3986 	case GETALL:
3987 	case SETALL:
3988             err = target_to_host_semarray(semid, &array, target_su.array);
3989             if (err)
3990                 return err;
3991             arg.array = array;
3992             ret = get_errno(semctl(semid, semnum, cmd, arg));
3993             err = host_to_target_semarray(semid, target_su.array, &array);
3994             if (err)
3995                 return err;
3996             break;
3997 	case IPC_STAT:
3998 	case IPC_SET:
3999 	case SEM_STAT:
4000             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4001             if (err)
4002                 return err;
4003             arg.buf = &dsarg;
4004             ret = get_errno(semctl(semid, semnum, cmd, arg));
4005             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4006             if (err)
4007                 return err;
4008             break;
4009 	case IPC_INFO:
4010 	case SEM_INFO:
4011             arg.__buf = &seminfo;
4012             ret = get_errno(semctl(semid, semnum, cmd, arg));
4013             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4014             if (err)
4015                 return err;
4016             break;
4017 	case IPC_RMID:
4018 	case GETPID:
4019 	case GETNCNT:
4020 	case GETZCNT:
4021             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4022             break;
4023     }
4024 
4025     return ret;
4026 }
4027 
4028 struct target_sembuf {
4029     unsigned short sem_num;
4030     short sem_op;
4031     short sem_flg;
4032 };
4033 
4034 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4035                                              abi_ulong target_addr,
4036                                              unsigned nsops)
4037 {
4038     struct target_sembuf *target_sembuf;
4039     int i;
4040 
4041     target_sembuf = lock_user(VERIFY_READ, target_addr,
4042                               nsops*sizeof(struct target_sembuf), 1);
4043     if (!target_sembuf)
4044         return -TARGET_EFAULT;
4045 
4046     for(i=0; i<nsops; i++) {
4047         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4048         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4049         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4050     }
4051 
4052     unlock_user(target_sembuf, target_addr, 0);
4053 
4054     return 0;
4055 }
4056 
4057 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4058     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4059 
4060 /*
4061  * This macro is required to handle the s390 variants, which passes the
4062  * arguments in a different order than default.
4063  */
4064 #ifdef __s390x__
4065 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4066   (__nsops), (__timeout), (__sops)
4067 #else
4068 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4069   (__nsops), 0, (__sops), (__timeout)
4070 #endif
4071 
4072 static inline abi_long do_semtimedop(int semid,
4073                                      abi_long ptr,
4074                                      unsigned nsops,
4075                                      abi_long timeout, bool time64)
4076 {
4077     struct sembuf *sops;
4078     struct timespec ts, *pts = NULL;
4079     abi_long ret;
4080 
4081     if (timeout) {
4082         pts = &ts;
4083         if (time64) {
4084             if (target_to_host_timespec64(pts, timeout)) {
4085                 return -TARGET_EFAULT;
4086             }
4087         } else {
4088             if (target_to_host_timespec(pts, timeout)) {
4089                 return -TARGET_EFAULT;
4090             }
4091         }
4092     }
4093 
4094     if (nsops > TARGET_SEMOPM) {
4095         return -TARGET_E2BIG;
4096     }
4097 
4098     sops = g_new(struct sembuf, nsops);
4099 
4100     if (target_to_host_sembuf(sops, ptr, nsops)) {
4101         g_free(sops);
4102         return -TARGET_EFAULT;
4103     }
4104 
4105     ret = -TARGET_ENOSYS;
4106 #ifdef __NR_semtimedop
4107     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4108 #endif
4109 #ifdef __NR_ipc
4110     if (ret == -TARGET_ENOSYS) {
4111         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4112                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4113     }
4114 #endif
4115     g_free(sops);
4116     return ret;
4117 }
4118 #endif
4119 
4120 struct target_msqid_ds
4121 {
4122     struct target_ipc_perm msg_perm;
4123     abi_ulong msg_stime;
4124 #if TARGET_ABI_BITS == 32
4125     abi_ulong __unused1;
4126 #endif
4127     abi_ulong msg_rtime;
4128 #if TARGET_ABI_BITS == 32
4129     abi_ulong __unused2;
4130 #endif
4131     abi_ulong msg_ctime;
4132 #if TARGET_ABI_BITS == 32
4133     abi_ulong __unused3;
4134 #endif
4135     abi_ulong __msg_cbytes;
4136     abi_ulong msg_qnum;
4137     abi_ulong msg_qbytes;
4138     abi_ulong msg_lspid;
4139     abi_ulong msg_lrpid;
4140     abi_ulong __unused4;
4141     abi_ulong __unused5;
4142 };
4143 
4144 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4145                                                abi_ulong target_addr)
4146 {
4147     struct target_msqid_ds *target_md;
4148 
4149     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4150         return -TARGET_EFAULT;
4151     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4152         return -TARGET_EFAULT;
4153     host_md->msg_stime = tswapal(target_md->msg_stime);
4154     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4155     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4156     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4157     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4158     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4159     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4160     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4161     unlock_user_struct(target_md, target_addr, 0);
4162     return 0;
4163 }
4164 
4165 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4166                                                struct msqid_ds *host_md)
4167 {
4168     struct target_msqid_ds *target_md;
4169 
4170     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4171         return -TARGET_EFAULT;
4172     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4173         return -TARGET_EFAULT;
4174     target_md->msg_stime = tswapal(host_md->msg_stime);
4175     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4176     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4177     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4178     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4179     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4180     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4181     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4182     unlock_user_struct(target_md, target_addr, 1);
4183     return 0;
4184 }
4185 
4186 struct target_msginfo {
4187     int msgpool;
4188     int msgmap;
4189     int msgmax;
4190     int msgmnb;
4191     int msgmni;
4192     int msgssz;
4193     int msgtql;
4194     unsigned short int msgseg;
4195 };
4196 
4197 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4198                                               struct msginfo *host_msginfo)
4199 {
4200     struct target_msginfo *target_msginfo;
4201     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4202         return -TARGET_EFAULT;
4203     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4204     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4205     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4206     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4207     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4208     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4209     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4210     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4211     unlock_user_struct(target_msginfo, target_addr, 1);
4212     return 0;
4213 }
4214 
4215 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4216 {
4217     struct msqid_ds dsarg;
4218     struct msginfo msginfo;
4219     abi_long ret = -TARGET_EINVAL;
4220 
4221     cmd &= 0xff;
4222 
4223     switch (cmd) {
4224     case IPC_STAT:
4225     case IPC_SET:
4226     case MSG_STAT:
4227         if (target_to_host_msqid_ds(&dsarg,ptr))
4228             return -TARGET_EFAULT;
4229         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4230         if (host_to_target_msqid_ds(ptr,&dsarg))
4231             return -TARGET_EFAULT;
4232         break;
4233     case IPC_RMID:
4234         ret = get_errno(msgctl(msgid, cmd, NULL));
4235         break;
4236     case IPC_INFO:
4237     case MSG_INFO:
4238         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4239         if (host_to_target_msginfo(ptr, &msginfo))
4240             return -TARGET_EFAULT;
4241         break;
4242     }
4243 
4244     return ret;
4245 }
4246 
4247 struct target_msgbuf {
4248     abi_long mtype;
4249     char	mtext[1];
4250 };
4251 
4252 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4253                                  ssize_t msgsz, int msgflg)
4254 {
4255     struct target_msgbuf *target_mb;
4256     struct msgbuf *host_mb;
4257     abi_long ret = 0;
4258 
4259     if (msgsz < 0) {
4260         return -TARGET_EINVAL;
4261     }
4262 
4263     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4264         return -TARGET_EFAULT;
4265     host_mb = g_try_malloc(msgsz + sizeof(long));
4266     if (!host_mb) {
4267         unlock_user_struct(target_mb, msgp, 0);
4268         return -TARGET_ENOMEM;
4269     }
4270     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4271     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4272     ret = -TARGET_ENOSYS;
4273 #ifdef __NR_msgsnd
4274     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4275 #endif
4276 #ifdef __NR_ipc
4277     if (ret == -TARGET_ENOSYS) {
4278 #ifdef __s390x__
4279         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4280                                  host_mb));
4281 #else
4282         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4283                                  host_mb, 0));
4284 #endif
4285     }
4286 #endif
4287     g_free(host_mb);
4288     unlock_user_struct(target_mb, msgp, 0);
4289 
4290     return ret;
4291 }
4292 
4293 #ifdef __NR_ipc
4294 #if defined(__sparc__)
4295 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4296 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4297 #elif defined(__s390x__)
4298 /* The s390 sys_ipc variant has only five parameters.  */
4299 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4300     ((long int[]){(long int)__msgp, __msgtyp})
4301 #else
4302 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4303     ((long int[]){(long int)__msgp, __msgtyp}), 0
4304 #endif
4305 #endif
4306 
4307 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4308                                  ssize_t msgsz, abi_long msgtyp,
4309                                  int msgflg)
4310 {
4311     struct target_msgbuf *target_mb;
4312     char *target_mtext;
4313     struct msgbuf *host_mb;
4314     abi_long ret = 0;
4315 
4316     if (msgsz < 0) {
4317         return -TARGET_EINVAL;
4318     }
4319 
4320     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4321         return -TARGET_EFAULT;
4322 
4323     host_mb = g_try_malloc(msgsz + sizeof(long));
4324     if (!host_mb) {
4325         ret = -TARGET_ENOMEM;
4326         goto end;
4327     }
4328     ret = -TARGET_ENOSYS;
4329 #ifdef __NR_msgrcv
4330     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4331 #endif
4332 #ifdef __NR_ipc
4333     if (ret == -TARGET_ENOSYS) {
4334         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4335                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4336     }
4337 #endif
4338 
4339     if (ret > 0) {
4340         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4341         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4342         if (!target_mtext) {
4343             ret = -TARGET_EFAULT;
4344             goto end;
4345         }
4346         memcpy(target_mb->mtext, host_mb->mtext, ret);
4347         unlock_user(target_mtext, target_mtext_addr, ret);
4348     }
4349 
4350     target_mb->mtype = tswapal(host_mb->mtype);
4351 
4352 end:
4353     if (target_mb)
4354         unlock_user_struct(target_mb, msgp, 1);
4355     g_free(host_mb);
4356     return ret;
4357 }
4358 
4359 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4360                                                abi_ulong target_addr)
4361 {
4362     struct target_shmid_ds *target_sd;
4363 
4364     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4365         return -TARGET_EFAULT;
4366     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4367         return -TARGET_EFAULT;
4368     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4369     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4370     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4371     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4372     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4373     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4374     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4375     unlock_user_struct(target_sd, target_addr, 0);
4376     return 0;
4377 }
4378 
4379 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4380                                                struct shmid_ds *host_sd)
4381 {
4382     struct target_shmid_ds *target_sd;
4383 
4384     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4385         return -TARGET_EFAULT;
4386     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4387         return -TARGET_EFAULT;
4388     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4389     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4390     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4391     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4392     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4393     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4394     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4395     unlock_user_struct(target_sd, target_addr, 1);
4396     return 0;
4397 }
4398 
4399 struct  target_shminfo {
4400     abi_ulong shmmax;
4401     abi_ulong shmmin;
4402     abi_ulong shmmni;
4403     abi_ulong shmseg;
4404     abi_ulong shmall;
4405 };
4406 
4407 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4408                                               struct shminfo *host_shminfo)
4409 {
4410     struct target_shminfo *target_shminfo;
4411     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4412         return -TARGET_EFAULT;
4413     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4414     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4415     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4416     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4417     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4418     unlock_user_struct(target_shminfo, target_addr, 1);
4419     return 0;
4420 }
4421 
4422 struct target_shm_info {
4423     int used_ids;
4424     abi_ulong shm_tot;
4425     abi_ulong shm_rss;
4426     abi_ulong shm_swp;
4427     abi_ulong swap_attempts;
4428     abi_ulong swap_successes;
4429 };
4430 
4431 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4432                                                struct shm_info *host_shm_info)
4433 {
4434     struct target_shm_info *target_shm_info;
4435     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4436         return -TARGET_EFAULT;
4437     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4438     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4439     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4440     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4441     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4442     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4443     unlock_user_struct(target_shm_info, target_addr, 1);
4444     return 0;
4445 }
4446 
4447 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4448 {
4449     struct shmid_ds dsarg;
4450     struct shminfo shminfo;
4451     struct shm_info shm_info;
4452     abi_long ret = -TARGET_EINVAL;
4453 
4454     cmd &= 0xff;
4455 
4456     switch(cmd) {
4457     case IPC_STAT:
4458     case IPC_SET:
4459     case SHM_STAT:
4460         if (target_to_host_shmid_ds(&dsarg, buf))
4461             return -TARGET_EFAULT;
4462         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4463         if (host_to_target_shmid_ds(buf, &dsarg))
4464             return -TARGET_EFAULT;
4465         break;
4466     case IPC_INFO:
4467         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4468         if (host_to_target_shminfo(buf, &shminfo))
4469             return -TARGET_EFAULT;
4470         break;
4471     case SHM_INFO:
4472         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4473         if (host_to_target_shm_info(buf, &shm_info))
4474             return -TARGET_EFAULT;
4475         break;
4476     case IPC_RMID:
4477     case SHM_LOCK:
4478     case SHM_UNLOCK:
4479         ret = get_errno(shmctl(shmid, cmd, NULL));
4480         break;
4481     }
4482 
4483     return ret;
4484 }
4485 
4486 #ifndef TARGET_FORCE_SHMLBA
4487 /* For most architectures, SHMLBA is the same as the page size;
4488  * some architectures have larger values, in which case they should
4489  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4490  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4491  * and defining its own value for SHMLBA.
4492  *
4493  * The kernel also permits SHMLBA to be set by the architecture to a
4494  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4495  * this means that addresses are rounded to the large size if
4496  * SHM_RND is set but addresses not aligned to that size are not rejected
4497  * as long as they are at least page-aligned. Since the only architecture
4498  * which uses this is ia64 this code doesn't provide for that oddity.
4499  */
4500 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4501 {
4502     return TARGET_PAGE_SIZE;
4503 }
4504 #endif
4505 
4506 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4507                                  int shmid, abi_ulong shmaddr, int shmflg)
4508 {
4509     CPUState *cpu = env_cpu(cpu_env);
4510     abi_long raddr;
4511     void *host_raddr;
4512     struct shmid_ds shm_info;
4513     int i,ret;
4514     abi_ulong shmlba;
4515 
4516     /* shmat pointers are always untagged */
4517 
4518     /* find out the length of the shared memory segment */
4519     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4520     if (is_error(ret)) {
4521         /* can't get length, bail out */
4522         return ret;
4523     }
4524 
4525     shmlba = target_shmlba(cpu_env);
4526 
4527     if (shmaddr & (shmlba - 1)) {
4528         if (shmflg & SHM_RND) {
4529             shmaddr &= ~(shmlba - 1);
4530         } else {
4531             return -TARGET_EINVAL;
4532         }
4533     }
4534     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4535         return -TARGET_EINVAL;
4536     }
4537 
4538     mmap_lock();
4539 
4540     /*
4541      * We're mapping shared memory, so ensure we generate code for parallel
4542      * execution and flush old translations.  This will work up to the level
4543      * supported by the host -- anything that requires EXCP_ATOMIC will not
4544      * be atomic with respect to an external process.
4545      */
4546     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4547         cpu->tcg_cflags |= CF_PARALLEL;
4548         tb_flush(cpu);
4549     }
4550 
4551     if (shmaddr)
4552         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4553     else {
4554         abi_ulong mmap_start;
4555 
4556         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4557         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4558 
4559         if (mmap_start == -1) {
4560             errno = ENOMEM;
4561             host_raddr = (void *)-1;
4562         } else
4563             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4564                                shmflg | SHM_REMAP);
4565     }
4566 
4567     if (host_raddr == (void *)-1) {
4568         mmap_unlock();
4569         return get_errno((long)host_raddr);
4570     }
4571     raddr=h2g((unsigned long)host_raddr);
4572 
4573     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4574                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4575                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4576 
4577     for (i = 0; i < N_SHM_REGIONS; i++) {
4578         if (!shm_regions[i].in_use) {
4579             shm_regions[i].in_use = true;
4580             shm_regions[i].start = raddr;
4581             shm_regions[i].size = shm_info.shm_segsz;
4582             break;
4583         }
4584     }
4585 
4586     mmap_unlock();
4587     return raddr;
4588 
4589 }
4590 
4591 static inline abi_long do_shmdt(abi_ulong shmaddr)
4592 {
4593     int i;
4594     abi_long rv;
4595 
4596     /* shmdt pointers are always untagged */
4597 
4598     mmap_lock();
4599 
4600     for (i = 0; i < N_SHM_REGIONS; ++i) {
4601         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4602             shm_regions[i].in_use = false;
4603             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4604             break;
4605         }
4606     }
4607     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4608 
4609     mmap_unlock();
4610 
4611     return rv;
4612 }
4613 
4614 #ifdef TARGET_NR_ipc
4615 /* ??? This only works with linear mappings.  */
4616 /* do_ipc() must return target values and target errnos. */
4617 static abi_long do_ipc(CPUArchState *cpu_env,
4618                        unsigned int call, abi_long first,
4619                        abi_long second, abi_long third,
4620                        abi_long ptr, abi_long fifth)
4621 {
4622     int version;
4623     abi_long ret = 0;
4624 
4625     version = call >> 16;
4626     call &= 0xffff;
4627 
4628     switch (call) {
4629     case IPCOP_semop:
4630         ret = do_semtimedop(first, ptr, second, 0, false);
4631         break;
4632     case IPCOP_semtimedop:
4633     /*
4634      * The s390 sys_ipc variant has only five parameters instead of six
4635      * (as for default variant) and the only difference is the handling of
4636      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4637      * to a struct timespec where the generic variant uses fifth parameter.
4638      */
4639 #if defined(TARGET_S390X)
4640         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4641 #else
4642         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4643 #endif
4644         break;
4645 
4646     case IPCOP_semget:
4647         ret = get_errno(semget(first, second, third));
4648         break;
4649 
4650     case IPCOP_semctl: {
4651         /* The semun argument to semctl is passed by value, so dereference the
4652          * ptr argument. */
4653         abi_ulong atptr;
4654         get_user_ual(atptr, ptr);
4655         ret = do_semctl(first, second, third, atptr);
4656         break;
4657     }
4658 
4659     case IPCOP_msgget:
4660         ret = get_errno(msgget(first, second));
4661         break;
4662 
4663     case IPCOP_msgsnd:
4664         ret = do_msgsnd(first, ptr, second, third);
4665         break;
4666 
4667     case IPCOP_msgctl:
4668         ret = do_msgctl(first, second, ptr);
4669         break;
4670 
4671     case IPCOP_msgrcv:
4672         switch (version) {
4673         case 0:
4674             {
4675                 struct target_ipc_kludge {
4676                     abi_long msgp;
4677                     abi_long msgtyp;
4678                 } *tmp;
4679 
4680                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4681                     ret = -TARGET_EFAULT;
4682                     break;
4683                 }
4684 
4685                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4686 
4687                 unlock_user_struct(tmp, ptr, 0);
4688                 break;
4689             }
4690         default:
4691             ret = do_msgrcv(first, ptr, second, fifth, third);
4692         }
4693         break;
4694 
4695     case IPCOP_shmat:
4696         switch (version) {
4697         default:
4698         {
4699             abi_ulong raddr;
4700             raddr = do_shmat(cpu_env, first, ptr, second);
4701             if (is_error(raddr))
4702                 return get_errno(raddr);
4703             if (put_user_ual(raddr, third))
4704                 return -TARGET_EFAULT;
4705             break;
4706         }
4707         case 1:
4708             ret = -TARGET_EINVAL;
4709             break;
4710         }
4711 	break;
4712     case IPCOP_shmdt:
4713         ret = do_shmdt(ptr);
4714 	break;
4715 
4716     case IPCOP_shmget:
4717 	/* IPC_* flag values are the same on all linux platforms */
4718 	ret = get_errno(shmget(first, second, third));
4719 	break;
4720 
4721 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4722     case IPCOP_shmctl:
4723         ret = do_shmctl(first, second, ptr);
4724         break;
4725     default:
4726         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4727                       call, version);
4728 	ret = -TARGET_ENOSYS;
4729 	break;
4730     }
4731     return ret;
4732 }
4733 #endif
4734 
4735 /* kernel structure types definitions */
4736 
4737 #define STRUCT(name, ...) STRUCT_ ## name,
4738 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4739 enum {
4740 #include "syscall_types.h"
4741 STRUCT_MAX
4742 };
4743 #undef STRUCT
4744 #undef STRUCT_SPECIAL
4745 
4746 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4747 #define STRUCT_SPECIAL(name)
4748 #include "syscall_types.h"
4749 #undef STRUCT
4750 #undef STRUCT_SPECIAL
4751 
4752 #define MAX_STRUCT_SIZE 4096
4753 
4754 #ifdef CONFIG_FIEMAP
4755 /* So fiemap access checks don't overflow on 32 bit systems.
4756  * This is very slightly smaller than the limit imposed by
4757  * the underlying kernel.
4758  */
4759 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4760                             / sizeof(struct fiemap_extent))
4761 
4762 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4763                                        int fd, int cmd, abi_long arg)
4764 {
4765     /* The parameter for this ioctl is a struct fiemap followed
4766      * by an array of struct fiemap_extent whose size is set
4767      * in fiemap->fm_extent_count. The array is filled in by the
4768      * ioctl.
4769      */
4770     int target_size_in, target_size_out;
4771     struct fiemap *fm;
4772     const argtype *arg_type = ie->arg_type;
4773     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4774     void *argptr, *p;
4775     abi_long ret;
4776     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4777     uint32_t outbufsz;
4778     int free_fm = 0;
4779 
4780     assert(arg_type[0] == TYPE_PTR);
4781     assert(ie->access == IOC_RW);
4782     arg_type++;
4783     target_size_in = thunk_type_size(arg_type, 0);
4784     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4785     if (!argptr) {
4786         return -TARGET_EFAULT;
4787     }
4788     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4789     unlock_user(argptr, arg, 0);
4790     fm = (struct fiemap *)buf_temp;
4791     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4792         return -TARGET_EINVAL;
4793     }
4794 
4795     outbufsz = sizeof (*fm) +
4796         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4797 
4798     if (outbufsz > MAX_STRUCT_SIZE) {
4799         /* We can't fit all the extents into the fixed size buffer.
4800          * Allocate one that is large enough and use it instead.
4801          */
4802         fm = g_try_malloc(outbufsz);
4803         if (!fm) {
4804             return -TARGET_ENOMEM;
4805         }
4806         memcpy(fm, buf_temp, sizeof(struct fiemap));
4807         free_fm = 1;
4808     }
4809     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4810     if (!is_error(ret)) {
4811         target_size_out = target_size_in;
4812         /* An extent_count of 0 means we were only counting the extents
4813          * so there are no structs to copy
4814          */
4815         if (fm->fm_extent_count != 0) {
4816             target_size_out += fm->fm_mapped_extents * extent_size;
4817         }
4818         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4819         if (!argptr) {
4820             ret = -TARGET_EFAULT;
4821         } else {
4822             /* Convert the struct fiemap */
4823             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4824             if (fm->fm_extent_count != 0) {
4825                 p = argptr + target_size_in;
4826                 /* ...and then all the struct fiemap_extents */
4827                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4828                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4829                                   THUNK_TARGET);
4830                     p += extent_size;
4831                 }
4832             }
4833             unlock_user(argptr, arg, target_size_out);
4834         }
4835     }
4836     if (free_fm) {
4837         g_free(fm);
4838     }
4839     return ret;
4840 }
4841 #endif
4842 
4843 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4844                                 int fd, int cmd, abi_long arg)
4845 {
4846     const argtype *arg_type = ie->arg_type;
4847     int target_size;
4848     void *argptr;
4849     int ret;
4850     struct ifconf *host_ifconf;
4851     uint32_t outbufsz;
4852     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4853     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4854     int target_ifreq_size;
4855     int nb_ifreq;
4856     int free_buf = 0;
4857     int i;
4858     int target_ifc_len;
4859     abi_long target_ifc_buf;
4860     int host_ifc_len;
4861     char *host_ifc_buf;
4862 
4863     assert(arg_type[0] == TYPE_PTR);
4864     assert(ie->access == IOC_RW);
4865 
4866     arg_type++;
4867     target_size = thunk_type_size(arg_type, 0);
4868 
4869     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4870     if (!argptr)
4871         return -TARGET_EFAULT;
4872     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4873     unlock_user(argptr, arg, 0);
4874 
4875     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4876     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4877     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4878 
4879     if (target_ifc_buf != 0) {
4880         target_ifc_len = host_ifconf->ifc_len;
4881         nb_ifreq = target_ifc_len / target_ifreq_size;
4882         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4883 
4884         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4885         if (outbufsz > MAX_STRUCT_SIZE) {
4886             /*
4887              * We can't fit all the extents into the fixed size buffer.
4888              * Allocate one that is large enough and use it instead.
4889              */
4890             host_ifconf = g_try_malloc(outbufsz);
4891             if (!host_ifconf) {
4892                 return -TARGET_ENOMEM;
4893             }
4894             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4895             free_buf = 1;
4896         }
4897         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4898 
4899         host_ifconf->ifc_len = host_ifc_len;
4900     } else {
4901       host_ifc_buf = NULL;
4902     }
4903     host_ifconf->ifc_buf = host_ifc_buf;
4904 
4905     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4906     if (!is_error(ret)) {
4907 	/* convert host ifc_len to target ifc_len */
4908 
4909         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4910         target_ifc_len = nb_ifreq * target_ifreq_size;
4911         host_ifconf->ifc_len = target_ifc_len;
4912 
4913 	/* restore target ifc_buf */
4914 
4915         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4916 
4917 	/* copy struct ifconf to target user */
4918 
4919         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4920         if (!argptr)
4921             return -TARGET_EFAULT;
4922         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4923         unlock_user(argptr, arg, target_size);
4924 
4925         if (target_ifc_buf != 0) {
4926             /* copy ifreq[] to target user */
4927             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4928             for (i = 0; i < nb_ifreq ; i++) {
4929                 thunk_convert(argptr + i * target_ifreq_size,
4930                               host_ifc_buf + i * sizeof(struct ifreq),
4931                               ifreq_arg_type, THUNK_TARGET);
4932             }
4933             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4934         }
4935     }
4936 
4937     if (free_buf) {
4938         g_free(host_ifconf);
4939     }
4940 
4941     return ret;
4942 }
4943 
4944 #if defined(CONFIG_USBFS)
4945 #if HOST_LONG_BITS > 64
4946 #error USBDEVFS thunks do not support >64 bit hosts yet.
4947 #endif
4948 struct live_urb {
4949     uint64_t target_urb_adr;
4950     uint64_t target_buf_adr;
4951     char *target_buf_ptr;
4952     struct usbdevfs_urb host_urb;
4953 };
4954 
4955 static GHashTable *usbdevfs_urb_hashtable(void)
4956 {
4957     static GHashTable *urb_hashtable;
4958 
4959     if (!urb_hashtable) {
4960         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4961     }
4962     return urb_hashtable;
4963 }
4964 
4965 static void urb_hashtable_insert(struct live_urb *urb)
4966 {
4967     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4968     g_hash_table_insert(urb_hashtable, urb, urb);
4969 }
4970 
4971 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4972 {
4973     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4974     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4975 }
4976 
4977 static void urb_hashtable_remove(struct live_urb *urb)
4978 {
4979     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4980     g_hash_table_remove(urb_hashtable, urb);
4981 }
4982 
4983 static abi_long
4984 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4985                           int fd, int cmd, abi_long arg)
4986 {
4987     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4988     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4989     struct live_urb *lurb;
4990     void *argptr;
4991     uint64_t hurb;
4992     int target_size;
4993     uintptr_t target_urb_adr;
4994     abi_long ret;
4995 
4996     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4997 
4998     memset(buf_temp, 0, sizeof(uint64_t));
4999     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5000     if (is_error(ret)) {
5001         return ret;
5002     }
5003 
5004     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5005     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5006     if (!lurb->target_urb_adr) {
5007         return -TARGET_EFAULT;
5008     }
5009     urb_hashtable_remove(lurb);
5010     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5011         lurb->host_urb.buffer_length);
5012     lurb->target_buf_ptr = NULL;
5013 
5014     /* restore the guest buffer pointer */
5015     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5016 
5017     /* update the guest urb struct */
5018     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5019     if (!argptr) {
5020         g_free(lurb);
5021         return -TARGET_EFAULT;
5022     }
5023     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5024     unlock_user(argptr, lurb->target_urb_adr, target_size);
5025 
5026     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5027     /* write back the urb handle */
5028     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5029     if (!argptr) {
5030         g_free(lurb);
5031         return -TARGET_EFAULT;
5032     }
5033 
5034     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5035     target_urb_adr = lurb->target_urb_adr;
5036     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5037     unlock_user(argptr, arg, target_size);
5038 
5039     g_free(lurb);
5040     return ret;
5041 }
5042 
5043 static abi_long
5044 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5045                              uint8_t *buf_temp __attribute__((unused)),
5046                              int fd, int cmd, abi_long arg)
5047 {
5048     struct live_urb *lurb;
5049 
5050     /* map target address back to host URB with metadata. */
5051     lurb = urb_hashtable_lookup(arg);
5052     if (!lurb) {
5053         return -TARGET_EFAULT;
5054     }
5055     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5056 }
5057 
5058 static abi_long
5059 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5060                             int fd, int cmd, abi_long arg)
5061 {
5062     const argtype *arg_type = ie->arg_type;
5063     int target_size;
5064     abi_long ret;
5065     void *argptr;
5066     int rw_dir;
5067     struct live_urb *lurb;
5068 
5069     /*
5070      * each submitted URB needs to map to a unique ID for the
5071      * kernel, and that unique ID needs to be a pointer to
5072      * host memory.  hence, we need to malloc for each URB.
5073      * isochronous transfers have a variable length struct.
5074      */
5075     arg_type++;
5076     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5077 
5078     /* construct host copy of urb and metadata */
5079     lurb = g_try_malloc0(sizeof(struct live_urb));
5080     if (!lurb) {
5081         return -TARGET_ENOMEM;
5082     }
5083 
5084     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5085     if (!argptr) {
5086         g_free(lurb);
5087         return -TARGET_EFAULT;
5088     }
5089     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5090     unlock_user(argptr, arg, 0);
5091 
5092     lurb->target_urb_adr = arg;
5093     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5094 
5095     /* buffer space used depends on endpoint type so lock the entire buffer */
5096     /* control type urbs should check the buffer contents for true direction */
5097     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5098     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5099         lurb->host_urb.buffer_length, 1);
5100     if (lurb->target_buf_ptr == NULL) {
5101         g_free(lurb);
5102         return -TARGET_EFAULT;
5103     }
5104 
5105     /* update buffer pointer in host copy */
5106     lurb->host_urb.buffer = lurb->target_buf_ptr;
5107 
5108     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5109     if (is_error(ret)) {
5110         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5111         g_free(lurb);
5112     } else {
5113         urb_hashtable_insert(lurb);
5114     }
5115 
5116     return ret;
5117 }
5118 #endif /* CONFIG_USBFS */
5119 
5120 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5121                             int cmd, abi_long arg)
5122 {
5123     void *argptr;
5124     struct dm_ioctl *host_dm;
5125     abi_long guest_data;
5126     uint32_t guest_data_size;
5127     int target_size;
5128     const argtype *arg_type = ie->arg_type;
5129     abi_long ret;
5130     void *big_buf = NULL;
5131     char *host_data;
5132 
5133     arg_type++;
5134     target_size = thunk_type_size(arg_type, 0);
5135     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5136     if (!argptr) {
5137         ret = -TARGET_EFAULT;
5138         goto out;
5139     }
5140     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5141     unlock_user(argptr, arg, 0);
5142 
5143     /* buf_temp is too small, so fetch things into a bigger buffer */
5144     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5145     memcpy(big_buf, buf_temp, target_size);
5146     buf_temp = big_buf;
5147     host_dm = big_buf;
5148 
5149     guest_data = arg + host_dm->data_start;
5150     if ((guest_data - arg) < 0) {
5151         ret = -TARGET_EINVAL;
5152         goto out;
5153     }
5154     guest_data_size = host_dm->data_size - host_dm->data_start;
5155     host_data = (char*)host_dm + host_dm->data_start;
5156 
5157     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5158     if (!argptr) {
5159         ret = -TARGET_EFAULT;
5160         goto out;
5161     }
5162 
5163     switch (ie->host_cmd) {
5164     case DM_REMOVE_ALL:
5165     case DM_LIST_DEVICES:
5166     case DM_DEV_CREATE:
5167     case DM_DEV_REMOVE:
5168     case DM_DEV_SUSPEND:
5169     case DM_DEV_STATUS:
5170     case DM_DEV_WAIT:
5171     case DM_TABLE_STATUS:
5172     case DM_TABLE_CLEAR:
5173     case DM_TABLE_DEPS:
5174     case DM_LIST_VERSIONS:
5175         /* no input data */
5176         break;
5177     case DM_DEV_RENAME:
5178     case DM_DEV_SET_GEOMETRY:
5179         /* data contains only strings */
5180         memcpy(host_data, argptr, guest_data_size);
5181         break;
5182     case DM_TARGET_MSG:
5183         memcpy(host_data, argptr, guest_data_size);
5184         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5185         break;
5186     case DM_TABLE_LOAD:
5187     {
5188         void *gspec = argptr;
5189         void *cur_data = host_data;
5190         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5191         int spec_size = thunk_type_size(arg_type, 0);
5192         int i;
5193 
5194         for (i = 0; i < host_dm->target_count; i++) {
5195             struct dm_target_spec *spec = cur_data;
5196             uint32_t next;
5197             int slen;
5198 
5199             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5200             slen = strlen((char*)gspec + spec_size) + 1;
5201             next = spec->next;
5202             spec->next = sizeof(*spec) + slen;
5203             strcpy((char*)&spec[1], gspec + spec_size);
5204             gspec += next;
5205             cur_data += spec->next;
5206         }
5207         break;
5208     }
5209     default:
5210         ret = -TARGET_EINVAL;
5211         unlock_user(argptr, guest_data, 0);
5212         goto out;
5213     }
5214     unlock_user(argptr, guest_data, 0);
5215 
5216     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5217     if (!is_error(ret)) {
5218         guest_data = arg + host_dm->data_start;
5219         guest_data_size = host_dm->data_size - host_dm->data_start;
5220         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5221         switch (ie->host_cmd) {
5222         case DM_REMOVE_ALL:
5223         case DM_DEV_CREATE:
5224         case DM_DEV_REMOVE:
5225         case DM_DEV_RENAME:
5226         case DM_DEV_SUSPEND:
5227         case DM_DEV_STATUS:
5228         case DM_TABLE_LOAD:
5229         case DM_TABLE_CLEAR:
5230         case DM_TARGET_MSG:
5231         case DM_DEV_SET_GEOMETRY:
5232             /* no return data */
5233             break;
5234         case DM_LIST_DEVICES:
5235         {
5236             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5237             uint32_t remaining_data = guest_data_size;
5238             void *cur_data = argptr;
5239             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5240             int nl_size = 12; /* can't use thunk_size due to alignment */
5241 
5242             while (1) {
5243                 uint32_t next = nl->next;
5244                 if (next) {
5245                     nl->next = nl_size + (strlen(nl->name) + 1);
5246                 }
5247                 if (remaining_data < nl->next) {
5248                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5249                     break;
5250                 }
5251                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5252                 strcpy(cur_data + nl_size, nl->name);
5253                 cur_data += nl->next;
5254                 remaining_data -= nl->next;
5255                 if (!next) {
5256                     break;
5257                 }
5258                 nl = (void*)nl + next;
5259             }
5260             break;
5261         }
5262         case DM_DEV_WAIT:
5263         case DM_TABLE_STATUS:
5264         {
5265             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5266             void *cur_data = argptr;
5267             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5268             int spec_size = thunk_type_size(arg_type, 0);
5269             int i;
5270 
5271             for (i = 0; i < host_dm->target_count; i++) {
5272                 uint32_t next = spec->next;
5273                 int slen = strlen((char*)&spec[1]) + 1;
5274                 spec->next = (cur_data - argptr) + spec_size + slen;
5275                 if (guest_data_size < spec->next) {
5276                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5277                     break;
5278                 }
5279                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5280                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5281                 cur_data = argptr + spec->next;
5282                 spec = (void*)host_dm + host_dm->data_start + next;
5283             }
5284             break;
5285         }
5286         case DM_TABLE_DEPS:
5287         {
5288             void *hdata = (void*)host_dm + host_dm->data_start;
5289             int count = *(uint32_t*)hdata;
5290             uint64_t *hdev = hdata + 8;
5291             uint64_t *gdev = argptr + 8;
5292             int i;
5293 
5294             *(uint32_t*)argptr = tswap32(count);
5295             for (i = 0; i < count; i++) {
5296                 *gdev = tswap64(*hdev);
5297                 gdev++;
5298                 hdev++;
5299             }
5300             break;
5301         }
5302         case DM_LIST_VERSIONS:
5303         {
5304             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5305             uint32_t remaining_data = guest_data_size;
5306             void *cur_data = argptr;
5307             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5308             int vers_size = thunk_type_size(arg_type, 0);
5309 
5310             while (1) {
5311                 uint32_t next = vers->next;
5312                 if (next) {
5313                     vers->next = vers_size + (strlen(vers->name) + 1);
5314                 }
5315                 if (remaining_data < vers->next) {
5316                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5317                     break;
5318                 }
5319                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5320                 strcpy(cur_data + vers_size, vers->name);
5321                 cur_data += vers->next;
5322                 remaining_data -= vers->next;
5323                 if (!next) {
5324                     break;
5325                 }
5326                 vers = (void*)vers + next;
5327             }
5328             break;
5329         }
5330         default:
5331             unlock_user(argptr, guest_data, 0);
5332             ret = -TARGET_EINVAL;
5333             goto out;
5334         }
5335         unlock_user(argptr, guest_data, guest_data_size);
5336 
5337         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5338         if (!argptr) {
5339             ret = -TARGET_EFAULT;
5340             goto out;
5341         }
5342         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5343         unlock_user(argptr, arg, target_size);
5344     }
5345 out:
5346     g_free(big_buf);
5347     return ret;
5348 }
5349 
5350 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5351                                int cmd, abi_long arg)
5352 {
5353     void *argptr;
5354     int target_size;
5355     const argtype *arg_type = ie->arg_type;
5356     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5357     abi_long ret;
5358 
5359     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5360     struct blkpg_partition host_part;
5361 
5362     /* Read and convert blkpg */
5363     arg_type++;
5364     target_size = thunk_type_size(arg_type, 0);
5365     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5366     if (!argptr) {
5367         ret = -TARGET_EFAULT;
5368         goto out;
5369     }
5370     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5371     unlock_user(argptr, arg, 0);
5372 
5373     switch (host_blkpg->op) {
5374     case BLKPG_ADD_PARTITION:
5375     case BLKPG_DEL_PARTITION:
5376         /* payload is struct blkpg_partition */
5377         break;
5378     default:
5379         /* Unknown opcode */
5380         ret = -TARGET_EINVAL;
5381         goto out;
5382     }
5383 
5384     /* Read and convert blkpg->data */
5385     arg = (abi_long)(uintptr_t)host_blkpg->data;
5386     target_size = thunk_type_size(part_arg_type, 0);
5387     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5388     if (!argptr) {
5389         ret = -TARGET_EFAULT;
5390         goto out;
5391     }
5392     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5393     unlock_user(argptr, arg, 0);
5394 
5395     /* Swizzle the data pointer to our local copy and call! */
5396     host_blkpg->data = &host_part;
5397     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5398 
5399 out:
5400     return ret;
5401 }
5402 
5403 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5404                                 int fd, int cmd, abi_long arg)
5405 {
5406     const argtype *arg_type = ie->arg_type;
5407     const StructEntry *se;
5408     const argtype *field_types;
5409     const int *dst_offsets, *src_offsets;
5410     int target_size;
5411     void *argptr;
5412     abi_ulong *target_rt_dev_ptr = NULL;
5413     unsigned long *host_rt_dev_ptr = NULL;
5414     abi_long ret;
5415     int i;
5416 
5417     assert(ie->access == IOC_W);
5418     assert(*arg_type == TYPE_PTR);
5419     arg_type++;
5420     assert(*arg_type == TYPE_STRUCT);
5421     target_size = thunk_type_size(arg_type, 0);
5422     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5423     if (!argptr) {
5424         return -TARGET_EFAULT;
5425     }
5426     arg_type++;
5427     assert(*arg_type == (int)STRUCT_rtentry);
5428     se = struct_entries + *arg_type++;
5429     assert(se->convert[0] == NULL);
5430     /* convert struct here to be able to catch rt_dev string */
5431     field_types = se->field_types;
5432     dst_offsets = se->field_offsets[THUNK_HOST];
5433     src_offsets = se->field_offsets[THUNK_TARGET];
5434     for (i = 0; i < se->nb_fields; i++) {
5435         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5436             assert(*field_types == TYPE_PTRVOID);
5437             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5438             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5439             if (*target_rt_dev_ptr != 0) {
5440                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5441                                                   tswapal(*target_rt_dev_ptr));
5442                 if (!*host_rt_dev_ptr) {
5443                     unlock_user(argptr, arg, 0);
5444                     return -TARGET_EFAULT;
5445                 }
5446             } else {
5447                 *host_rt_dev_ptr = 0;
5448             }
5449             field_types++;
5450             continue;
5451         }
5452         field_types = thunk_convert(buf_temp + dst_offsets[i],
5453                                     argptr + src_offsets[i],
5454                                     field_types, THUNK_HOST);
5455     }
5456     unlock_user(argptr, arg, 0);
5457 
5458     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5459 
5460     assert(host_rt_dev_ptr != NULL);
5461     assert(target_rt_dev_ptr != NULL);
5462     if (*host_rt_dev_ptr != 0) {
5463         unlock_user((void *)*host_rt_dev_ptr,
5464                     *target_rt_dev_ptr, 0);
5465     }
5466     return ret;
5467 }
5468 
5469 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5470                                      int fd, int cmd, abi_long arg)
5471 {
5472     int sig = target_to_host_signal(arg);
5473     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5474 }
5475 
5476 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5477                                     int fd, int cmd, abi_long arg)
5478 {
5479     struct timeval tv;
5480     abi_long ret;
5481 
5482     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5483     if (is_error(ret)) {
5484         return ret;
5485     }
5486 
5487     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5488         if (copy_to_user_timeval(arg, &tv)) {
5489             return -TARGET_EFAULT;
5490         }
5491     } else {
5492         if (copy_to_user_timeval64(arg, &tv)) {
5493             return -TARGET_EFAULT;
5494         }
5495     }
5496 
5497     return ret;
5498 }
5499 
5500 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5501                                       int fd, int cmd, abi_long arg)
5502 {
5503     struct timespec ts;
5504     abi_long ret;
5505 
5506     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5507     if (is_error(ret)) {
5508         return ret;
5509     }
5510 
5511     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5512         if (host_to_target_timespec(arg, &ts)) {
5513             return -TARGET_EFAULT;
5514         }
5515     } else{
5516         if (host_to_target_timespec64(arg, &ts)) {
5517             return -TARGET_EFAULT;
5518         }
5519     }
5520 
5521     return ret;
5522 }
5523 
5524 #ifdef TIOCGPTPEER
5525 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5526                                      int fd, int cmd, abi_long arg)
5527 {
5528     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5529     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5530 }
5531 #endif
5532 
5533 #ifdef HAVE_DRM_H
5534 
5535 static void unlock_drm_version(struct drm_version *host_ver,
5536                                struct target_drm_version *target_ver,
5537                                bool copy)
5538 {
5539     unlock_user(host_ver->name, target_ver->name,
5540                                 copy ? host_ver->name_len : 0);
5541     unlock_user(host_ver->date, target_ver->date,
5542                                 copy ? host_ver->date_len : 0);
5543     unlock_user(host_ver->desc, target_ver->desc,
5544                                 copy ? host_ver->desc_len : 0);
5545 }
5546 
5547 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5548                                           struct target_drm_version *target_ver)
5549 {
5550     memset(host_ver, 0, sizeof(*host_ver));
5551 
5552     __get_user(host_ver->name_len, &target_ver->name_len);
5553     if (host_ver->name_len) {
5554         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5555                                    target_ver->name_len, 0);
5556         if (!host_ver->name) {
5557             return -EFAULT;
5558         }
5559     }
5560 
5561     __get_user(host_ver->date_len, &target_ver->date_len);
5562     if (host_ver->date_len) {
5563         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5564                                    target_ver->date_len, 0);
5565         if (!host_ver->date) {
5566             goto err;
5567         }
5568     }
5569 
5570     __get_user(host_ver->desc_len, &target_ver->desc_len);
5571     if (host_ver->desc_len) {
5572         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5573                                    target_ver->desc_len, 0);
5574         if (!host_ver->desc) {
5575             goto err;
5576         }
5577     }
5578 
5579     return 0;
5580 err:
5581     unlock_drm_version(host_ver, target_ver, false);
5582     return -EFAULT;
5583 }
5584 
5585 static inline void host_to_target_drmversion(
5586                                           struct target_drm_version *target_ver,
5587                                           struct drm_version *host_ver)
5588 {
5589     __put_user(host_ver->version_major, &target_ver->version_major);
5590     __put_user(host_ver->version_minor, &target_ver->version_minor);
5591     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5592     __put_user(host_ver->name_len, &target_ver->name_len);
5593     __put_user(host_ver->date_len, &target_ver->date_len);
5594     __put_user(host_ver->desc_len, &target_ver->desc_len);
5595     unlock_drm_version(host_ver, target_ver, true);
5596 }
5597 
5598 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5599                              int fd, int cmd, abi_long arg)
5600 {
5601     struct drm_version *ver;
5602     struct target_drm_version *target_ver;
5603     abi_long ret;
5604 
5605     switch (ie->host_cmd) {
5606     case DRM_IOCTL_VERSION:
5607         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5608             return -TARGET_EFAULT;
5609         }
5610         ver = (struct drm_version *)buf_temp;
5611         ret = target_to_host_drmversion(ver, target_ver);
5612         if (!is_error(ret)) {
5613             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5614             if (is_error(ret)) {
5615                 unlock_drm_version(ver, target_ver, false);
5616             } else {
5617                 host_to_target_drmversion(target_ver, ver);
5618             }
5619         }
5620         unlock_user_struct(target_ver, arg, 0);
5621         return ret;
5622     }
5623     return -TARGET_ENOSYS;
5624 }
5625 
5626 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5627                                            struct drm_i915_getparam *gparam,
5628                                            int fd, abi_long arg)
5629 {
5630     abi_long ret;
5631     int value;
5632     struct target_drm_i915_getparam *target_gparam;
5633 
5634     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5635         return -TARGET_EFAULT;
5636     }
5637 
5638     __get_user(gparam->param, &target_gparam->param);
5639     gparam->value = &value;
5640     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5641     put_user_s32(value, target_gparam->value);
5642 
5643     unlock_user_struct(target_gparam, arg, 0);
5644     return ret;
5645 }
5646 
5647 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5648                                   int fd, int cmd, abi_long arg)
5649 {
5650     switch (ie->host_cmd) {
5651     case DRM_IOCTL_I915_GETPARAM:
5652         return do_ioctl_drm_i915_getparam(ie,
5653                                           (struct drm_i915_getparam *)buf_temp,
5654                                           fd, arg);
5655     default:
5656         return -TARGET_ENOSYS;
5657     }
5658 }
5659 
5660 #endif
5661 
5662 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5663                                         int fd, int cmd, abi_long arg)
5664 {
5665     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5666     struct tun_filter *target_filter;
5667     char *target_addr;
5668 
5669     assert(ie->access == IOC_W);
5670 
5671     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5672     if (!target_filter) {
5673         return -TARGET_EFAULT;
5674     }
5675     filter->flags = tswap16(target_filter->flags);
5676     filter->count = tswap16(target_filter->count);
5677     unlock_user(target_filter, arg, 0);
5678 
5679     if (filter->count) {
5680         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5681             MAX_STRUCT_SIZE) {
5682             return -TARGET_EFAULT;
5683         }
5684 
5685         target_addr = lock_user(VERIFY_READ,
5686                                 arg + offsetof(struct tun_filter, addr),
5687                                 filter->count * ETH_ALEN, 1);
5688         if (!target_addr) {
5689             return -TARGET_EFAULT;
5690         }
5691         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5692         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5693     }
5694 
5695     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5696 }
5697 
5698 IOCTLEntry ioctl_entries[] = {
5699 #define IOCTL(cmd, access, ...) \
5700     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5701 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5702     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5703 #define IOCTL_IGNORE(cmd) \
5704     { TARGET_ ## cmd, 0, #cmd },
5705 #include "ioctls.h"
5706     { 0, 0, },
5707 };
5708 
5709 /* ??? Implement proper locking for ioctls.  */
5710 /* do_ioctl() Must return target values and target errnos. */
5711 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5712 {
5713     const IOCTLEntry *ie;
5714     const argtype *arg_type;
5715     abi_long ret;
5716     uint8_t buf_temp[MAX_STRUCT_SIZE];
5717     int target_size;
5718     void *argptr;
5719 
5720     ie = ioctl_entries;
5721     for(;;) {
5722         if (ie->target_cmd == 0) {
5723             qemu_log_mask(
5724                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5725             return -TARGET_ENOSYS;
5726         }
5727         if (ie->target_cmd == cmd)
5728             break;
5729         ie++;
5730     }
5731     arg_type = ie->arg_type;
5732     if (ie->do_ioctl) {
5733         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5734     } else if (!ie->host_cmd) {
5735         /* Some architectures define BSD ioctls in their headers
5736            that are not implemented in Linux.  */
5737         return -TARGET_ENOSYS;
5738     }
5739 
5740     switch(arg_type[0]) {
5741     case TYPE_NULL:
5742         /* no argument */
5743         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5744         break;
5745     case TYPE_PTRVOID:
5746     case TYPE_INT:
5747     case TYPE_LONG:
5748     case TYPE_ULONG:
5749         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5750         break;
5751     case TYPE_PTR:
5752         arg_type++;
5753         target_size = thunk_type_size(arg_type, 0);
5754         switch(ie->access) {
5755         case IOC_R:
5756             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5757             if (!is_error(ret)) {
5758                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5759                 if (!argptr)
5760                     return -TARGET_EFAULT;
5761                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5762                 unlock_user(argptr, arg, target_size);
5763             }
5764             break;
5765         case IOC_W:
5766             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5767             if (!argptr)
5768                 return -TARGET_EFAULT;
5769             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5770             unlock_user(argptr, arg, 0);
5771             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5772             break;
5773         default:
5774         case IOC_RW:
5775             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5776             if (!argptr)
5777                 return -TARGET_EFAULT;
5778             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5779             unlock_user(argptr, arg, 0);
5780             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5781             if (!is_error(ret)) {
5782                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5783                 if (!argptr)
5784                     return -TARGET_EFAULT;
5785                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5786                 unlock_user(argptr, arg, target_size);
5787             }
5788             break;
5789         }
5790         break;
5791     default:
5792         qemu_log_mask(LOG_UNIMP,
5793                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5794                       (long)cmd, arg_type[0]);
5795         ret = -TARGET_ENOSYS;
5796         break;
5797     }
5798     return ret;
5799 }
5800 
5801 static const bitmask_transtbl iflag_tbl[] = {
5802         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5803         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5804         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5805         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5806         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5807         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5808         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5809         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5810         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5811         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5812         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5813         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5814         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5815         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5816         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5817         { 0, 0, 0, 0 }
5818 };
5819 
5820 static const bitmask_transtbl oflag_tbl[] = {
5821 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5822 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5823 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5824 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5825 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5826 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5827 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5828 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5829 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5830 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5831 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5832 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5833 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5834 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5835 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5836 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5837 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5838 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5839 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5840 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5841 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5842 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5843 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5844 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5845 	{ 0, 0, 0, 0 }
5846 };
5847 
5848 static const bitmask_transtbl cflag_tbl[] = {
5849 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5850 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5851 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5852 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5853 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5854 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5855 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5856 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5857 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5858 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5859 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5860 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5861 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5862 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5863 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5864 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5865 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5866 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5867 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5868 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5869 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5870 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5871 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5872 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5873 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5874 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5875 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5876 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5877 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5878 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5879 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5880 	{ 0, 0, 0, 0 }
5881 };
5882 
5883 static const bitmask_transtbl lflag_tbl[] = {
5884   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5885   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5886   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5887   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5888   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5889   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5890   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5891   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5892   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5893   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5894   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5895   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5896   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5897   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5898   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5899   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5900   { 0, 0, 0, 0 }
5901 };
5902 
5903 static void target_to_host_termios (void *dst, const void *src)
5904 {
5905     struct host_termios *host = dst;
5906     const struct target_termios *target = src;
5907 
5908     host->c_iflag =
5909         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5910     host->c_oflag =
5911         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5912     host->c_cflag =
5913         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5914     host->c_lflag =
5915         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5916     host->c_line = target->c_line;
5917 
5918     memset(host->c_cc, 0, sizeof(host->c_cc));
5919     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5920     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5921     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5922     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5923     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5924     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5925     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5926     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5927     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5928     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5929     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5930     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5931     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5932     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5933     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5934     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5935     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5936 }
5937 
5938 static void host_to_target_termios (void *dst, const void *src)
5939 {
5940     struct target_termios *target = dst;
5941     const struct host_termios *host = src;
5942 
5943     target->c_iflag =
5944         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5945     target->c_oflag =
5946         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5947     target->c_cflag =
5948         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5949     target->c_lflag =
5950         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5951     target->c_line = host->c_line;
5952 
5953     memset(target->c_cc, 0, sizeof(target->c_cc));
5954     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5955     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5956     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5957     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5958     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5959     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5960     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5961     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5962     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5963     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5964     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5965     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5966     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5967     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5968     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5969     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5970     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5971 }
5972 
5973 static const StructEntry struct_termios_def = {
5974     .convert = { host_to_target_termios, target_to_host_termios },
5975     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5976     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5977     .print = print_termios,
5978 };
5979 
5980 static const bitmask_transtbl mmap_flags_tbl[] = {
5981     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5982     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5983     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5984     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5985       MAP_ANONYMOUS, MAP_ANONYMOUS },
5986     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5987       MAP_GROWSDOWN, MAP_GROWSDOWN },
5988     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5989       MAP_DENYWRITE, MAP_DENYWRITE },
5990     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5991       MAP_EXECUTABLE, MAP_EXECUTABLE },
5992     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5993     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5994       MAP_NORESERVE, MAP_NORESERVE },
5995     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5996     /* MAP_STACK had been ignored by the kernel for quite some time.
5997        Recognize it for the target insofar as we do not want to pass
5998        it through to the host.  */
5999     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6000     { 0, 0, 0, 0 }
6001 };
6002 
6003 /*
6004  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6005  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6006  */
6007 #if defined(TARGET_I386)
6008 
6009 /* NOTE: there is really one LDT for all the threads */
6010 static uint8_t *ldt_table;
6011 
6012 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6013 {
6014     int size;
6015     void *p;
6016 
6017     if (!ldt_table)
6018         return 0;
6019     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6020     if (size > bytecount)
6021         size = bytecount;
6022     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6023     if (!p)
6024         return -TARGET_EFAULT;
6025     /* ??? Should this by byteswapped?  */
6026     memcpy(p, ldt_table, size);
6027     unlock_user(p, ptr, size);
6028     return size;
6029 }
6030 
6031 /* XXX: add locking support */
6032 static abi_long write_ldt(CPUX86State *env,
6033                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6034 {
6035     struct target_modify_ldt_ldt_s ldt_info;
6036     struct target_modify_ldt_ldt_s *target_ldt_info;
6037     int seg_32bit, contents, read_exec_only, limit_in_pages;
6038     int seg_not_present, useable, lm;
6039     uint32_t *lp, entry_1, entry_2;
6040 
6041     if (bytecount != sizeof(ldt_info))
6042         return -TARGET_EINVAL;
6043     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6044         return -TARGET_EFAULT;
6045     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6046     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6047     ldt_info.limit = tswap32(target_ldt_info->limit);
6048     ldt_info.flags = tswap32(target_ldt_info->flags);
6049     unlock_user_struct(target_ldt_info, ptr, 0);
6050 
6051     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6052         return -TARGET_EINVAL;
6053     seg_32bit = ldt_info.flags & 1;
6054     contents = (ldt_info.flags >> 1) & 3;
6055     read_exec_only = (ldt_info.flags >> 3) & 1;
6056     limit_in_pages = (ldt_info.flags >> 4) & 1;
6057     seg_not_present = (ldt_info.flags >> 5) & 1;
6058     useable = (ldt_info.flags >> 6) & 1;
6059 #ifdef TARGET_ABI32
6060     lm = 0;
6061 #else
6062     lm = (ldt_info.flags >> 7) & 1;
6063 #endif
6064     if (contents == 3) {
6065         if (oldmode)
6066             return -TARGET_EINVAL;
6067         if (seg_not_present == 0)
6068             return -TARGET_EINVAL;
6069     }
6070     /* allocate the LDT */
6071     if (!ldt_table) {
6072         env->ldt.base = target_mmap(0,
6073                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6074                                     PROT_READ|PROT_WRITE,
6075                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6076         if (env->ldt.base == -1)
6077             return -TARGET_ENOMEM;
6078         memset(g2h_untagged(env->ldt.base), 0,
6079                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6080         env->ldt.limit = 0xffff;
6081         ldt_table = g2h_untagged(env->ldt.base);
6082     }
6083 
6084     /* NOTE: same code as Linux kernel */
6085     /* Allow LDTs to be cleared by the user. */
6086     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6087         if (oldmode ||
6088             (contents == 0		&&
6089              read_exec_only == 1	&&
6090              seg_32bit == 0		&&
6091              limit_in_pages == 0	&&
6092              seg_not_present == 1	&&
6093              useable == 0 )) {
6094             entry_1 = 0;
6095             entry_2 = 0;
6096             goto install;
6097         }
6098     }
6099 
6100     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6101         (ldt_info.limit & 0x0ffff);
6102     entry_2 = (ldt_info.base_addr & 0xff000000) |
6103         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6104         (ldt_info.limit & 0xf0000) |
6105         ((read_exec_only ^ 1) << 9) |
6106         (contents << 10) |
6107         ((seg_not_present ^ 1) << 15) |
6108         (seg_32bit << 22) |
6109         (limit_in_pages << 23) |
6110         (lm << 21) |
6111         0x7000;
6112     if (!oldmode)
6113         entry_2 |= (useable << 20);
6114 
6115     /* Install the new entry ...  */
6116 install:
6117     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6118     lp[0] = tswap32(entry_1);
6119     lp[1] = tswap32(entry_2);
6120     return 0;
6121 }
6122 
6123 /* specific and weird i386 syscalls */
6124 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6125                               unsigned long bytecount)
6126 {
6127     abi_long ret;
6128 
6129     switch (func) {
6130     case 0:
6131         ret = read_ldt(ptr, bytecount);
6132         break;
6133     case 1:
6134         ret = write_ldt(env, ptr, bytecount, 1);
6135         break;
6136     case 0x11:
6137         ret = write_ldt(env, ptr, bytecount, 0);
6138         break;
6139     default:
6140         ret = -TARGET_ENOSYS;
6141         break;
6142     }
6143     return ret;
6144 }
6145 
6146 #if defined(TARGET_ABI32)
6147 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6148 {
6149     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6150     struct target_modify_ldt_ldt_s ldt_info;
6151     struct target_modify_ldt_ldt_s *target_ldt_info;
6152     int seg_32bit, contents, read_exec_only, limit_in_pages;
6153     int seg_not_present, useable, lm;
6154     uint32_t *lp, entry_1, entry_2;
6155     int i;
6156 
6157     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6158     if (!target_ldt_info)
6159         return -TARGET_EFAULT;
6160     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6161     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6162     ldt_info.limit = tswap32(target_ldt_info->limit);
6163     ldt_info.flags = tswap32(target_ldt_info->flags);
6164     if (ldt_info.entry_number == -1) {
6165         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6166             if (gdt_table[i] == 0) {
6167                 ldt_info.entry_number = i;
6168                 target_ldt_info->entry_number = tswap32(i);
6169                 break;
6170             }
6171         }
6172     }
6173     unlock_user_struct(target_ldt_info, ptr, 1);
6174 
6175     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6176         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6177            return -TARGET_EINVAL;
6178     seg_32bit = ldt_info.flags & 1;
6179     contents = (ldt_info.flags >> 1) & 3;
6180     read_exec_only = (ldt_info.flags >> 3) & 1;
6181     limit_in_pages = (ldt_info.flags >> 4) & 1;
6182     seg_not_present = (ldt_info.flags >> 5) & 1;
6183     useable = (ldt_info.flags >> 6) & 1;
6184 #ifdef TARGET_ABI32
6185     lm = 0;
6186 #else
6187     lm = (ldt_info.flags >> 7) & 1;
6188 #endif
6189 
6190     if (contents == 3) {
6191         if (seg_not_present == 0)
6192             return -TARGET_EINVAL;
6193     }
6194 
6195     /* NOTE: same code as Linux kernel */
6196     /* Allow LDTs to be cleared by the user. */
6197     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6198         if ((contents == 0             &&
6199              read_exec_only == 1       &&
6200              seg_32bit == 0            &&
6201              limit_in_pages == 0       &&
6202              seg_not_present == 1      &&
6203              useable == 0 )) {
6204             entry_1 = 0;
6205             entry_2 = 0;
6206             goto install;
6207         }
6208     }
6209 
6210     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6211         (ldt_info.limit & 0x0ffff);
6212     entry_2 = (ldt_info.base_addr & 0xff000000) |
6213         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6214         (ldt_info.limit & 0xf0000) |
6215         ((read_exec_only ^ 1) << 9) |
6216         (contents << 10) |
6217         ((seg_not_present ^ 1) << 15) |
6218         (seg_32bit << 22) |
6219         (limit_in_pages << 23) |
6220         (useable << 20) |
6221         (lm << 21) |
6222         0x7000;
6223 
6224     /* Install the new entry ...  */
6225 install:
6226     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6227     lp[0] = tswap32(entry_1);
6228     lp[1] = tswap32(entry_2);
6229     return 0;
6230 }
6231 
6232 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6233 {
6234     struct target_modify_ldt_ldt_s *target_ldt_info;
6235     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6236     uint32_t base_addr, limit, flags;
6237     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6238     int seg_not_present, useable, lm;
6239     uint32_t *lp, entry_1, entry_2;
6240 
6241     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6242     if (!target_ldt_info)
6243         return -TARGET_EFAULT;
6244     idx = tswap32(target_ldt_info->entry_number);
6245     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6246         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6247         unlock_user_struct(target_ldt_info, ptr, 1);
6248         return -TARGET_EINVAL;
6249     }
6250     lp = (uint32_t *)(gdt_table + idx);
6251     entry_1 = tswap32(lp[0]);
6252     entry_2 = tswap32(lp[1]);
6253 
6254     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6255     contents = (entry_2 >> 10) & 3;
6256     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6257     seg_32bit = (entry_2 >> 22) & 1;
6258     limit_in_pages = (entry_2 >> 23) & 1;
6259     useable = (entry_2 >> 20) & 1;
6260 #ifdef TARGET_ABI32
6261     lm = 0;
6262 #else
6263     lm = (entry_2 >> 21) & 1;
6264 #endif
6265     flags = (seg_32bit << 0) | (contents << 1) |
6266         (read_exec_only << 3) | (limit_in_pages << 4) |
6267         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6268     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6269     base_addr = (entry_1 >> 16) |
6270         (entry_2 & 0xff000000) |
6271         ((entry_2 & 0xff) << 16);
6272     target_ldt_info->base_addr = tswapal(base_addr);
6273     target_ldt_info->limit = tswap32(limit);
6274     target_ldt_info->flags = tswap32(flags);
6275     unlock_user_struct(target_ldt_info, ptr, 1);
6276     return 0;
6277 }
6278 
6279 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6280 {
6281     return -TARGET_ENOSYS;
6282 }
6283 #else
6284 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6285 {
6286     abi_long ret = 0;
6287     abi_ulong val;
6288     int idx;
6289 
6290     switch(code) {
6291     case TARGET_ARCH_SET_GS:
6292     case TARGET_ARCH_SET_FS:
6293         if (code == TARGET_ARCH_SET_GS)
6294             idx = R_GS;
6295         else
6296             idx = R_FS;
6297         cpu_x86_load_seg(env, idx, 0);
6298         env->segs[idx].base = addr;
6299         break;
6300     case TARGET_ARCH_GET_GS:
6301     case TARGET_ARCH_GET_FS:
6302         if (code == TARGET_ARCH_GET_GS)
6303             idx = R_GS;
6304         else
6305             idx = R_FS;
6306         val = env->segs[idx].base;
6307         if (put_user(val, addr, abi_ulong))
6308             ret = -TARGET_EFAULT;
6309         break;
6310     default:
6311         ret = -TARGET_EINVAL;
6312         break;
6313     }
6314     return ret;
6315 }
6316 #endif /* defined(TARGET_ABI32 */
6317 #endif /* defined(TARGET_I386) */
6318 
6319 /*
6320  * These constants are generic.  Supply any that are missing from the host.
6321  */
6322 #ifndef PR_SET_NAME
6323 # define PR_SET_NAME    15
6324 # define PR_GET_NAME    16
6325 #endif
6326 #ifndef PR_SET_FP_MODE
6327 # define PR_SET_FP_MODE 45
6328 # define PR_GET_FP_MODE 46
6329 # define PR_FP_MODE_FR   (1 << 0)
6330 # define PR_FP_MODE_FRE  (1 << 1)
6331 #endif
6332 #ifndef PR_SVE_SET_VL
6333 # define PR_SVE_SET_VL  50
6334 # define PR_SVE_GET_VL  51
6335 # define PR_SVE_VL_LEN_MASK  0xffff
6336 # define PR_SVE_VL_INHERIT   (1 << 17)
6337 #endif
6338 #ifndef PR_PAC_RESET_KEYS
6339 # define PR_PAC_RESET_KEYS  54
6340 # define PR_PAC_APIAKEY   (1 << 0)
6341 # define PR_PAC_APIBKEY   (1 << 1)
6342 # define PR_PAC_APDAKEY   (1 << 2)
6343 # define PR_PAC_APDBKEY   (1 << 3)
6344 # define PR_PAC_APGAKEY   (1 << 4)
6345 #endif
6346 #ifndef PR_SET_TAGGED_ADDR_CTRL
6347 # define PR_SET_TAGGED_ADDR_CTRL 55
6348 # define PR_GET_TAGGED_ADDR_CTRL 56
6349 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6350 #endif
6351 #ifndef PR_MTE_TCF_SHIFT
6352 # define PR_MTE_TCF_SHIFT       1
6353 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6354 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6355 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6356 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6357 # define PR_MTE_TAG_SHIFT       3
6358 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6359 #endif
6360 #ifndef PR_SET_IO_FLUSHER
6361 # define PR_SET_IO_FLUSHER 57
6362 # define PR_GET_IO_FLUSHER 58
6363 #endif
6364 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6365 # define PR_SET_SYSCALL_USER_DISPATCH 59
6366 #endif
6367 
6368 #include "target_prctl.h"
6369 
6370 static abi_long do_prctl_inval0(CPUArchState *env)
6371 {
6372     return -TARGET_EINVAL;
6373 }
6374 
6375 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6376 {
6377     return -TARGET_EINVAL;
6378 }
6379 
6380 #ifndef do_prctl_get_fp_mode
6381 #define do_prctl_get_fp_mode do_prctl_inval0
6382 #endif
6383 #ifndef do_prctl_set_fp_mode
6384 #define do_prctl_set_fp_mode do_prctl_inval1
6385 #endif
6386 #ifndef do_prctl_get_vl
6387 #define do_prctl_get_vl do_prctl_inval0
6388 #endif
6389 #ifndef do_prctl_set_vl
6390 #define do_prctl_set_vl do_prctl_inval1
6391 #endif
6392 #ifndef do_prctl_reset_keys
6393 #define do_prctl_reset_keys do_prctl_inval1
6394 #endif
6395 #ifndef do_prctl_set_tagged_addr_ctrl
6396 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6397 #endif
6398 #ifndef do_prctl_get_tagged_addr_ctrl
6399 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6400 #endif
6401 #ifndef do_prctl_get_unalign
6402 #define do_prctl_get_unalign do_prctl_inval1
6403 #endif
6404 #ifndef do_prctl_set_unalign
6405 #define do_prctl_set_unalign do_prctl_inval1
6406 #endif
6407 
6408 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6409                          abi_long arg3, abi_long arg4, abi_long arg5)
6410 {
6411     abi_long ret;
6412 
6413     switch (option) {
6414     case PR_GET_PDEATHSIG:
6415         {
6416             int deathsig;
6417             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6418                                   arg3, arg4, arg5));
6419             if (!is_error(ret) &&
6420                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6421                 return -TARGET_EFAULT;
6422             }
6423             return ret;
6424         }
6425     case PR_SET_PDEATHSIG:
6426         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6427                                arg3, arg4, arg5));
6428     case PR_GET_NAME:
6429         {
6430             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6431             if (!name) {
6432                 return -TARGET_EFAULT;
6433             }
6434             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6435                                   arg3, arg4, arg5));
6436             unlock_user(name, arg2, 16);
6437             return ret;
6438         }
6439     case PR_SET_NAME:
6440         {
6441             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6442             if (!name) {
6443                 return -TARGET_EFAULT;
6444             }
6445             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6446                                   arg3, arg4, arg5));
6447             unlock_user(name, arg2, 0);
6448             return ret;
6449         }
6450     case PR_GET_FP_MODE:
6451         return do_prctl_get_fp_mode(env);
6452     case PR_SET_FP_MODE:
6453         return do_prctl_set_fp_mode(env, arg2);
6454     case PR_SVE_GET_VL:
6455         return do_prctl_get_vl(env);
6456     case PR_SVE_SET_VL:
6457         return do_prctl_set_vl(env, arg2);
6458     case PR_PAC_RESET_KEYS:
6459         if (arg3 || arg4 || arg5) {
6460             return -TARGET_EINVAL;
6461         }
6462         return do_prctl_reset_keys(env, arg2);
6463     case PR_SET_TAGGED_ADDR_CTRL:
6464         if (arg3 || arg4 || arg5) {
6465             return -TARGET_EINVAL;
6466         }
6467         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6468     case PR_GET_TAGGED_ADDR_CTRL:
6469         if (arg2 || arg3 || arg4 || arg5) {
6470             return -TARGET_EINVAL;
6471         }
6472         return do_prctl_get_tagged_addr_ctrl(env);
6473 
6474     case PR_GET_UNALIGN:
6475         return do_prctl_get_unalign(env, arg2);
6476     case PR_SET_UNALIGN:
6477         return do_prctl_set_unalign(env, arg2);
6478 
6479     case PR_CAP_AMBIENT:
6480     case PR_CAPBSET_READ:
6481     case PR_CAPBSET_DROP:
6482     case PR_GET_DUMPABLE:
6483     case PR_SET_DUMPABLE:
6484     case PR_GET_KEEPCAPS:
6485     case PR_SET_KEEPCAPS:
6486     case PR_GET_SECUREBITS:
6487     case PR_SET_SECUREBITS:
6488     case PR_GET_TIMING:
6489     case PR_SET_TIMING:
6490     case PR_GET_TIMERSLACK:
6491     case PR_SET_TIMERSLACK:
6492     case PR_MCE_KILL:
6493     case PR_MCE_KILL_GET:
6494     case PR_GET_NO_NEW_PRIVS:
6495     case PR_SET_NO_NEW_PRIVS:
6496     case PR_GET_IO_FLUSHER:
6497     case PR_SET_IO_FLUSHER:
6498         /* Some prctl options have no pointer arguments and we can pass on. */
6499         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6500 
6501     case PR_GET_CHILD_SUBREAPER:
6502     case PR_SET_CHILD_SUBREAPER:
6503     case PR_GET_SPECULATION_CTRL:
6504     case PR_SET_SPECULATION_CTRL:
6505     case PR_GET_TID_ADDRESS:
6506         /* TODO */
6507         return -TARGET_EINVAL;
6508 
6509     case PR_GET_FPEXC:
6510     case PR_SET_FPEXC:
6511         /* Was used for SPE on PowerPC. */
6512         return -TARGET_EINVAL;
6513 
6514     case PR_GET_ENDIAN:
6515     case PR_SET_ENDIAN:
6516     case PR_GET_FPEMU:
6517     case PR_SET_FPEMU:
6518     case PR_SET_MM:
6519     case PR_GET_SECCOMP:
6520     case PR_SET_SECCOMP:
6521     case PR_SET_SYSCALL_USER_DISPATCH:
6522     case PR_GET_THP_DISABLE:
6523     case PR_SET_THP_DISABLE:
6524     case PR_GET_TSC:
6525     case PR_SET_TSC:
6526         /* Disable to prevent the target disabling stuff we need. */
6527         return -TARGET_EINVAL;
6528 
6529     default:
6530         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6531                       option);
6532         return -TARGET_EINVAL;
6533     }
6534 }
6535 
6536 #define NEW_STACK_SIZE 0x40000
6537 
6538 
6539 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6540 typedef struct {
6541     CPUArchState *env;
6542     pthread_mutex_t mutex;
6543     pthread_cond_t cond;
6544     pthread_t thread;
6545     uint32_t tid;
6546     abi_ulong child_tidptr;
6547     abi_ulong parent_tidptr;
6548     sigset_t sigmask;
6549 } new_thread_info;
6550 
6551 static void *clone_func(void *arg)
6552 {
6553     new_thread_info *info = arg;
6554     CPUArchState *env;
6555     CPUState *cpu;
6556     TaskState *ts;
6557 
6558     rcu_register_thread();
6559     tcg_register_thread();
6560     env = info->env;
6561     cpu = env_cpu(env);
6562     thread_cpu = cpu;
6563     ts = (TaskState *)cpu->opaque;
6564     info->tid = sys_gettid();
6565     task_settid(ts);
6566     if (info->child_tidptr)
6567         put_user_u32(info->tid, info->child_tidptr);
6568     if (info->parent_tidptr)
6569         put_user_u32(info->tid, info->parent_tidptr);
6570     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6571     /* Enable signals.  */
6572     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6573     /* Signal to the parent that we're ready.  */
6574     pthread_mutex_lock(&info->mutex);
6575     pthread_cond_broadcast(&info->cond);
6576     pthread_mutex_unlock(&info->mutex);
6577     /* Wait until the parent has finished initializing the tls state.  */
6578     pthread_mutex_lock(&clone_lock);
6579     pthread_mutex_unlock(&clone_lock);
6580     cpu_loop(env);
6581     /* never exits */
6582     return NULL;
6583 }
6584 
6585 /* do_fork() Must return host values and target errnos (unlike most
6586    do_*() functions). */
6587 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6588                    abi_ulong parent_tidptr, target_ulong newtls,
6589                    abi_ulong child_tidptr)
6590 {
6591     CPUState *cpu = env_cpu(env);
6592     int ret;
6593     TaskState *ts;
6594     CPUState *new_cpu;
6595     CPUArchState *new_env;
6596     sigset_t sigmask;
6597 
6598     flags &= ~CLONE_IGNORED_FLAGS;
6599 
6600     /* Emulate vfork() with fork() */
6601     if (flags & CLONE_VFORK)
6602         flags &= ~(CLONE_VFORK | CLONE_VM);
6603 
6604     if (flags & CLONE_VM) {
6605         TaskState *parent_ts = (TaskState *)cpu->opaque;
6606         new_thread_info info;
6607         pthread_attr_t attr;
6608 
6609         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6610             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6611             return -TARGET_EINVAL;
6612         }
6613 
6614         ts = g_new0(TaskState, 1);
6615         init_task_state(ts);
6616 
6617         /* Grab a mutex so that thread setup appears atomic.  */
6618         pthread_mutex_lock(&clone_lock);
6619 
6620         /*
6621          * If this is our first additional thread, we need to ensure we
6622          * generate code for parallel execution and flush old translations.
6623          * Do this now so that the copy gets CF_PARALLEL too.
6624          */
6625         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6626             cpu->tcg_cflags |= CF_PARALLEL;
6627             tb_flush(cpu);
6628         }
6629 
6630         /* we create a new CPU instance. */
6631         new_env = cpu_copy(env);
6632         /* Init regs that differ from the parent.  */
6633         cpu_clone_regs_child(new_env, newsp, flags);
6634         cpu_clone_regs_parent(env, flags);
6635         new_cpu = env_cpu(new_env);
6636         new_cpu->opaque = ts;
6637         ts->bprm = parent_ts->bprm;
6638         ts->info = parent_ts->info;
6639         ts->signal_mask = parent_ts->signal_mask;
6640 
6641         if (flags & CLONE_CHILD_CLEARTID) {
6642             ts->child_tidptr = child_tidptr;
6643         }
6644 
6645         if (flags & CLONE_SETTLS) {
6646             cpu_set_tls (new_env, newtls);
6647         }
6648 
6649         memset(&info, 0, sizeof(info));
6650         pthread_mutex_init(&info.mutex, NULL);
6651         pthread_mutex_lock(&info.mutex);
6652         pthread_cond_init(&info.cond, NULL);
6653         info.env = new_env;
6654         if (flags & CLONE_CHILD_SETTID) {
6655             info.child_tidptr = child_tidptr;
6656         }
6657         if (flags & CLONE_PARENT_SETTID) {
6658             info.parent_tidptr = parent_tidptr;
6659         }
6660 
6661         ret = pthread_attr_init(&attr);
6662         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6663         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6664         /* It is not safe to deliver signals until the child has finished
6665            initializing, so temporarily block all signals.  */
6666         sigfillset(&sigmask);
6667         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6668         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6669 
6670         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6671         /* TODO: Free new CPU state if thread creation failed.  */
6672 
6673         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6674         pthread_attr_destroy(&attr);
6675         if (ret == 0) {
6676             /* Wait for the child to initialize.  */
6677             pthread_cond_wait(&info.cond, &info.mutex);
6678             ret = info.tid;
6679         } else {
6680             ret = -1;
6681         }
6682         pthread_mutex_unlock(&info.mutex);
6683         pthread_cond_destroy(&info.cond);
6684         pthread_mutex_destroy(&info.mutex);
6685         pthread_mutex_unlock(&clone_lock);
6686     } else {
6687         /* if no CLONE_VM, we consider it is a fork */
6688         if (flags & CLONE_INVALID_FORK_FLAGS) {
6689             return -TARGET_EINVAL;
6690         }
6691 
6692         /* We can't support custom termination signals */
6693         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6694             return -TARGET_EINVAL;
6695         }
6696 
6697         if (block_signals()) {
6698             return -QEMU_ERESTARTSYS;
6699         }
6700 
6701         fork_start();
6702         ret = fork();
6703         if (ret == 0) {
6704             /* Child Process.  */
6705             cpu_clone_regs_child(env, newsp, flags);
6706             fork_end(1);
6707             /* There is a race condition here.  The parent process could
6708                theoretically read the TID in the child process before the child
6709                tid is set.  This would require using either ptrace
6710                (not implemented) or having *_tidptr to point at a shared memory
6711                mapping.  We can't repeat the spinlock hack used above because
6712                the child process gets its own copy of the lock.  */
6713             if (flags & CLONE_CHILD_SETTID)
6714                 put_user_u32(sys_gettid(), child_tidptr);
6715             if (flags & CLONE_PARENT_SETTID)
6716                 put_user_u32(sys_gettid(), parent_tidptr);
6717             ts = (TaskState *)cpu->opaque;
6718             if (flags & CLONE_SETTLS)
6719                 cpu_set_tls (env, newtls);
6720             if (flags & CLONE_CHILD_CLEARTID)
6721                 ts->child_tidptr = child_tidptr;
6722         } else {
6723             cpu_clone_regs_parent(env, flags);
6724             fork_end(0);
6725         }
6726     }
6727     return ret;
6728 }
6729 
6730 /* warning : doesn't handle linux specific flags... */
6731 static int target_to_host_fcntl_cmd(int cmd)
6732 {
6733     int ret;
6734 
6735     switch(cmd) {
6736     case TARGET_F_DUPFD:
6737     case TARGET_F_GETFD:
6738     case TARGET_F_SETFD:
6739     case TARGET_F_GETFL:
6740     case TARGET_F_SETFL:
6741     case TARGET_F_OFD_GETLK:
6742     case TARGET_F_OFD_SETLK:
6743     case TARGET_F_OFD_SETLKW:
6744         ret = cmd;
6745         break;
6746     case TARGET_F_GETLK:
6747         ret = F_GETLK64;
6748         break;
6749     case TARGET_F_SETLK:
6750         ret = F_SETLK64;
6751         break;
6752     case TARGET_F_SETLKW:
6753         ret = F_SETLKW64;
6754         break;
6755     case TARGET_F_GETOWN:
6756         ret = F_GETOWN;
6757         break;
6758     case TARGET_F_SETOWN:
6759         ret = F_SETOWN;
6760         break;
6761     case TARGET_F_GETSIG:
6762         ret = F_GETSIG;
6763         break;
6764     case TARGET_F_SETSIG:
6765         ret = F_SETSIG;
6766         break;
6767 #if TARGET_ABI_BITS == 32
6768     case TARGET_F_GETLK64:
6769         ret = F_GETLK64;
6770         break;
6771     case TARGET_F_SETLK64:
6772         ret = F_SETLK64;
6773         break;
6774     case TARGET_F_SETLKW64:
6775         ret = F_SETLKW64;
6776         break;
6777 #endif
6778     case TARGET_F_SETLEASE:
6779         ret = F_SETLEASE;
6780         break;
6781     case TARGET_F_GETLEASE:
6782         ret = F_GETLEASE;
6783         break;
6784 #ifdef F_DUPFD_CLOEXEC
6785     case TARGET_F_DUPFD_CLOEXEC:
6786         ret = F_DUPFD_CLOEXEC;
6787         break;
6788 #endif
6789     case TARGET_F_NOTIFY:
6790         ret = F_NOTIFY;
6791         break;
6792 #ifdef F_GETOWN_EX
6793     case TARGET_F_GETOWN_EX:
6794         ret = F_GETOWN_EX;
6795         break;
6796 #endif
6797 #ifdef F_SETOWN_EX
6798     case TARGET_F_SETOWN_EX:
6799         ret = F_SETOWN_EX;
6800         break;
6801 #endif
6802 #ifdef F_SETPIPE_SZ
6803     case TARGET_F_SETPIPE_SZ:
6804         ret = F_SETPIPE_SZ;
6805         break;
6806     case TARGET_F_GETPIPE_SZ:
6807         ret = F_GETPIPE_SZ;
6808         break;
6809 #endif
6810 #ifdef F_ADD_SEALS
6811     case TARGET_F_ADD_SEALS:
6812         ret = F_ADD_SEALS;
6813         break;
6814     case TARGET_F_GET_SEALS:
6815         ret = F_GET_SEALS;
6816         break;
6817 #endif
6818     default:
6819         ret = -TARGET_EINVAL;
6820         break;
6821     }
6822 
6823 #if defined(__powerpc64__)
6824     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6825      * is not supported by kernel. The glibc fcntl call actually adjusts
6826      * them to 5, 6 and 7 before making the syscall(). Since we make the
6827      * syscall directly, adjust to what is supported by the kernel.
6828      */
6829     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6830         ret -= F_GETLK64 - 5;
6831     }
6832 #endif
6833 
6834     return ret;
6835 }
6836 
6837 #define FLOCK_TRANSTBL \
6838     switch (type) { \
6839     TRANSTBL_CONVERT(F_RDLCK); \
6840     TRANSTBL_CONVERT(F_WRLCK); \
6841     TRANSTBL_CONVERT(F_UNLCK); \
6842     }
6843 
6844 static int target_to_host_flock(int type)
6845 {
6846 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6847     FLOCK_TRANSTBL
6848 #undef  TRANSTBL_CONVERT
6849     return -TARGET_EINVAL;
6850 }
6851 
6852 static int host_to_target_flock(int type)
6853 {
6854 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6855     FLOCK_TRANSTBL
6856 #undef  TRANSTBL_CONVERT
6857     /* if we don't know how to convert the value coming
6858      * from the host we copy to the target field as-is
6859      */
6860     return type;
6861 }
6862 
6863 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6864                                             abi_ulong target_flock_addr)
6865 {
6866     struct target_flock *target_fl;
6867     int l_type;
6868 
6869     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6870         return -TARGET_EFAULT;
6871     }
6872 
6873     __get_user(l_type, &target_fl->l_type);
6874     l_type = target_to_host_flock(l_type);
6875     if (l_type < 0) {
6876         return l_type;
6877     }
6878     fl->l_type = l_type;
6879     __get_user(fl->l_whence, &target_fl->l_whence);
6880     __get_user(fl->l_start, &target_fl->l_start);
6881     __get_user(fl->l_len, &target_fl->l_len);
6882     __get_user(fl->l_pid, &target_fl->l_pid);
6883     unlock_user_struct(target_fl, target_flock_addr, 0);
6884     return 0;
6885 }
6886 
6887 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6888                                           const struct flock64 *fl)
6889 {
6890     struct target_flock *target_fl;
6891     short l_type;
6892 
6893     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6894         return -TARGET_EFAULT;
6895     }
6896 
6897     l_type = host_to_target_flock(fl->l_type);
6898     __put_user(l_type, &target_fl->l_type);
6899     __put_user(fl->l_whence, &target_fl->l_whence);
6900     __put_user(fl->l_start, &target_fl->l_start);
6901     __put_user(fl->l_len, &target_fl->l_len);
6902     __put_user(fl->l_pid, &target_fl->l_pid);
6903     unlock_user_struct(target_fl, target_flock_addr, 1);
6904     return 0;
6905 }
6906 
6907 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6908 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6909 
6910 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6911 struct target_oabi_flock64 {
6912     abi_short l_type;
6913     abi_short l_whence;
6914     abi_llong l_start;
6915     abi_llong l_len;
6916     abi_int   l_pid;
6917 } QEMU_PACKED;
6918 
6919 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6920                                                    abi_ulong target_flock_addr)
6921 {
6922     struct target_oabi_flock64 *target_fl;
6923     int l_type;
6924 
6925     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6926         return -TARGET_EFAULT;
6927     }
6928 
6929     __get_user(l_type, &target_fl->l_type);
6930     l_type = target_to_host_flock(l_type);
6931     if (l_type < 0) {
6932         return l_type;
6933     }
6934     fl->l_type = l_type;
6935     __get_user(fl->l_whence, &target_fl->l_whence);
6936     __get_user(fl->l_start, &target_fl->l_start);
6937     __get_user(fl->l_len, &target_fl->l_len);
6938     __get_user(fl->l_pid, &target_fl->l_pid);
6939     unlock_user_struct(target_fl, target_flock_addr, 0);
6940     return 0;
6941 }
6942 
6943 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6944                                                  const struct flock64 *fl)
6945 {
6946     struct target_oabi_flock64 *target_fl;
6947     short l_type;
6948 
6949     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6950         return -TARGET_EFAULT;
6951     }
6952 
6953     l_type = host_to_target_flock(fl->l_type);
6954     __put_user(l_type, &target_fl->l_type);
6955     __put_user(fl->l_whence, &target_fl->l_whence);
6956     __put_user(fl->l_start, &target_fl->l_start);
6957     __put_user(fl->l_len, &target_fl->l_len);
6958     __put_user(fl->l_pid, &target_fl->l_pid);
6959     unlock_user_struct(target_fl, target_flock_addr, 1);
6960     return 0;
6961 }
6962 #endif
6963 
6964 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6965                                               abi_ulong target_flock_addr)
6966 {
6967     struct target_flock64 *target_fl;
6968     int l_type;
6969 
6970     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6971         return -TARGET_EFAULT;
6972     }
6973 
6974     __get_user(l_type, &target_fl->l_type);
6975     l_type = target_to_host_flock(l_type);
6976     if (l_type < 0) {
6977         return l_type;
6978     }
6979     fl->l_type = l_type;
6980     __get_user(fl->l_whence, &target_fl->l_whence);
6981     __get_user(fl->l_start, &target_fl->l_start);
6982     __get_user(fl->l_len, &target_fl->l_len);
6983     __get_user(fl->l_pid, &target_fl->l_pid);
6984     unlock_user_struct(target_fl, target_flock_addr, 0);
6985     return 0;
6986 }
6987 
6988 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6989                                             const struct flock64 *fl)
6990 {
6991     struct target_flock64 *target_fl;
6992     short l_type;
6993 
6994     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6995         return -TARGET_EFAULT;
6996     }
6997 
6998     l_type = host_to_target_flock(fl->l_type);
6999     __put_user(l_type, &target_fl->l_type);
7000     __put_user(fl->l_whence, &target_fl->l_whence);
7001     __put_user(fl->l_start, &target_fl->l_start);
7002     __put_user(fl->l_len, &target_fl->l_len);
7003     __put_user(fl->l_pid, &target_fl->l_pid);
7004     unlock_user_struct(target_fl, target_flock_addr, 1);
7005     return 0;
7006 }
7007 
7008 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7009 {
7010     struct flock64 fl64;
7011 #ifdef F_GETOWN_EX
7012     struct f_owner_ex fox;
7013     struct target_f_owner_ex *target_fox;
7014 #endif
7015     abi_long ret;
7016     int host_cmd = target_to_host_fcntl_cmd(cmd);
7017 
7018     if (host_cmd == -TARGET_EINVAL)
7019 	    return host_cmd;
7020 
7021     switch(cmd) {
7022     case TARGET_F_GETLK:
7023         ret = copy_from_user_flock(&fl64, arg);
7024         if (ret) {
7025             return ret;
7026         }
7027         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7028         if (ret == 0) {
7029             ret = copy_to_user_flock(arg, &fl64);
7030         }
7031         break;
7032 
7033     case TARGET_F_SETLK:
7034     case TARGET_F_SETLKW:
7035         ret = copy_from_user_flock(&fl64, arg);
7036         if (ret) {
7037             return ret;
7038         }
7039         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7040         break;
7041 
7042     case TARGET_F_GETLK64:
7043     case TARGET_F_OFD_GETLK:
7044         ret = copy_from_user_flock64(&fl64, arg);
7045         if (ret) {
7046             return ret;
7047         }
7048         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7049         if (ret == 0) {
7050             ret = copy_to_user_flock64(arg, &fl64);
7051         }
7052         break;
7053     case TARGET_F_SETLK64:
7054     case TARGET_F_SETLKW64:
7055     case TARGET_F_OFD_SETLK:
7056     case TARGET_F_OFD_SETLKW:
7057         ret = copy_from_user_flock64(&fl64, arg);
7058         if (ret) {
7059             return ret;
7060         }
7061         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7062         break;
7063 
7064     case TARGET_F_GETFL:
7065         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7066         if (ret >= 0) {
7067             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7068         }
7069         break;
7070 
7071     case TARGET_F_SETFL:
7072         ret = get_errno(safe_fcntl(fd, host_cmd,
7073                                    target_to_host_bitmask(arg,
7074                                                           fcntl_flags_tbl)));
7075         break;
7076 
7077 #ifdef F_GETOWN_EX
7078     case TARGET_F_GETOWN_EX:
7079         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7080         if (ret >= 0) {
7081             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7082                 return -TARGET_EFAULT;
7083             target_fox->type = tswap32(fox.type);
7084             target_fox->pid = tswap32(fox.pid);
7085             unlock_user_struct(target_fox, arg, 1);
7086         }
7087         break;
7088 #endif
7089 
7090 #ifdef F_SETOWN_EX
7091     case TARGET_F_SETOWN_EX:
7092         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7093             return -TARGET_EFAULT;
7094         fox.type = tswap32(target_fox->type);
7095         fox.pid = tswap32(target_fox->pid);
7096         unlock_user_struct(target_fox, arg, 0);
7097         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7098         break;
7099 #endif
7100 
7101     case TARGET_F_SETSIG:
7102         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7103         break;
7104 
7105     case TARGET_F_GETSIG:
7106         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7107         break;
7108 
7109     case TARGET_F_SETOWN:
7110     case TARGET_F_GETOWN:
7111     case TARGET_F_SETLEASE:
7112     case TARGET_F_GETLEASE:
7113     case TARGET_F_SETPIPE_SZ:
7114     case TARGET_F_GETPIPE_SZ:
7115     case TARGET_F_ADD_SEALS:
7116     case TARGET_F_GET_SEALS:
7117         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7118         break;
7119 
7120     default:
7121         ret = get_errno(safe_fcntl(fd, cmd, arg));
7122         break;
7123     }
7124     return ret;
7125 }
7126 
7127 #ifdef USE_UID16
7128 
7129 static inline int high2lowuid(int uid)
7130 {
7131     if (uid > 65535)
7132         return 65534;
7133     else
7134         return uid;
7135 }
7136 
7137 static inline int high2lowgid(int gid)
7138 {
7139     if (gid > 65535)
7140         return 65534;
7141     else
7142         return gid;
7143 }
7144 
7145 static inline int low2highuid(int uid)
7146 {
7147     if ((int16_t)uid == -1)
7148         return -1;
7149     else
7150         return uid;
7151 }
7152 
7153 static inline int low2highgid(int gid)
7154 {
7155     if ((int16_t)gid == -1)
7156         return -1;
7157     else
7158         return gid;
7159 }
7160 static inline int tswapid(int id)
7161 {
7162     return tswap16(id);
7163 }
7164 
7165 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7166 
7167 #else /* !USE_UID16 */
7168 static inline int high2lowuid(int uid)
7169 {
7170     return uid;
7171 }
7172 static inline int high2lowgid(int gid)
7173 {
7174     return gid;
7175 }
7176 static inline int low2highuid(int uid)
7177 {
7178     return uid;
7179 }
7180 static inline int low2highgid(int gid)
7181 {
7182     return gid;
7183 }
7184 static inline int tswapid(int id)
7185 {
7186     return tswap32(id);
7187 }
7188 
7189 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7190 
7191 #endif /* USE_UID16 */
7192 
7193 /* We must do direct syscalls for setting UID/GID, because we want to
7194  * implement the Linux system call semantics of "change only for this thread",
7195  * not the libc/POSIX semantics of "change for all threads in process".
7196  * (See http://ewontfix.com/17/ for more details.)
7197  * We use the 32-bit version of the syscalls if present; if it is not
7198  * then either the host architecture supports 32-bit UIDs natively with
7199  * the standard syscall, or the 16-bit UID is the best we can do.
7200  */
7201 #ifdef __NR_setuid32
7202 #define __NR_sys_setuid __NR_setuid32
7203 #else
7204 #define __NR_sys_setuid __NR_setuid
7205 #endif
7206 #ifdef __NR_setgid32
7207 #define __NR_sys_setgid __NR_setgid32
7208 #else
7209 #define __NR_sys_setgid __NR_setgid
7210 #endif
7211 #ifdef __NR_setresuid32
7212 #define __NR_sys_setresuid __NR_setresuid32
7213 #else
7214 #define __NR_sys_setresuid __NR_setresuid
7215 #endif
7216 #ifdef __NR_setresgid32
7217 #define __NR_sys_setresgid __NR_setresgid32
7218 #else
7219 #define __NR_sys_setresgid __NR_setresgid
7220 #endif
7221 
7222 _syscall1(int, sys_setuid, uid_t, uid)
7223 _syscall1(int, sys_setgid, gid_t, gid)
7224 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7225 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7226 
7227 void syscall_init(void)
7228 {
7229     IOCTLEntry *ie;
7230     const argtype *arg_type;
7231     int size;
7232 
7233     thunk_init(STRUCT_MAX);
7234 
7235 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7236 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7237 #include "syscall_types.h"
7238 #undef STRUCT
7239 #undef STRUCT_SPECIAL
7240 
7241     /* we patch the ioctl size if necessary. We rely on the fact that
7242        no ioctl has all the bits at '1' in the size field */
7243     ie = ioctl_entries;
7244     while (ie->target_cmd != 0) {
7245         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7246             TARGET_IOC_SIZEMASK) {
7247             arg_type = ie->arg_type;
7248             if (arg_type[0] != TYPE_PTR) {
7249                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7250                         ie->target_cmd);
7251                 exit(1);
7252             }
7253             arg_type++;
7254             size = thunk_type_size(arg_type, 0);
7255             ie->target_cmd = (ie->target_cmd &
7256                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7257                 (size << TARGET_IOC_SIZESHIFT);
7258         }
7259 
7260         /* automatic consistency check if same arch */
7261 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7262     (defined(__x86_64__) && defined(TARGET_X86_64))
7263         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7264             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7265                     ie->name, ie->target_cmd, ie->host_cmd);
7266         }
7267 #endif
7268         ie++;
7269     }
7270 }
7271 
7272 #ifdef TARGET_NR_truncate64
7273 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7274                                          abi_long arg2,
7275                                          abi_long arg3,
7276                                          abi_long arg4)
7277 {
7278     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7279         arg2 = arg3;
7280         arg3 = arg4;
7281     }
7282     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7283 }
7284 #endif
7285 
7286 #ifdef TARGET_NR_ftruncate64
7287 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7288                                           abi_long arg2,
7289                                           abi_long arg3,
7290                                           abi_long arg4)
7291 {
7292     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7293         arg2 = arg3;
7294         arg3 = arg4;
7295     }
7296     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7297 }
7298 #endif
7299 
7300 #if defined(TARGET_NR_timer_settime) || \
7301     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7302 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7303                                                  abi_ulong target_addr)
7304 {
7305     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7306                                 offsetof(struct target_itimerspec,
7307                                          it_interval)) ||
7308         target_to_host_timespec(&host_its->it_value, target_addr +
7309                                 offsetof(struct target_itimerspec,
7310                                          it_value))) {
7311         return -TARGET_EFAULT;
7312     }
7313 
7314     return 0;
7315 }
7316 #endif
7317 
7318 #if defined(TARGET_NR_timer_settime64) || \
7319     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7320 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7321                                                    abi_ulong target_addr)
7322 {
7323     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7324                                   offsetof(struct target__kernel_itimerspec,
7325                                            it_interval)) ||
7326         target_to_host_timespec64(&host_its->it_value, target_addr +
7327                                   offsetof(struct target__kernel_itimerspec,
7328                                            it_value))) {
7329         return -TARGET_EFAULT;
7330     }
7331 
7332     return 0;
7333 }
7334 #endif
7335 
7336 #if ((defined(TARGET_NR_timerfd_gettime) || \
7337       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7338       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7339 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7340                                                  struct itimerspec *host_its)
7341 {
7342     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7343                                                        it_interval),
7344                                 &host_its->it_interval) ||
7345         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7346                                                        it_value),
7347                                 &host_its->it_value)) {
7348         return -TARGET_EFAULT;
7349     }
7350     return 0;
7351 }
7352 #endif
7353 
7354 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7355       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7356       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7357 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7358                                                    struct itimerspec *host_its)
7359 {
7360     if (host_to_target_timespec64(target_addr +
7361                                   offsetof(struct target__kernel_itimerspec,
7362                                            it_interval),
7363                                   &host_its->it_interval) ||
7364         host_to_target_timespec64(target_addr +
7365                                   offsetof(struct target__kernel_itimerspec,
7366                                            it_value),
7367                                   &host_its->it_value)) {
7368         return -TARGET_EFAULT;
7369     }
7370     return 0;
7371 }
7372 #endif
7373 
7374 #if defined(TARGET_NR_adjtimex) || \
7375     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7376 static inline abi_long target_to_host_timex(struct timex *host_tx,
7377                                             abi_long target_addr)
7378 {
7379     struct target_timex *target_tx;
7380 
7381     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7382         return -TARGET_EFAULT;
7383     }
7384 
7385     __get_user(host_tx->modes, &target_tx->modes);
7386     __get_user(host_tx->offset, &target_tx->offset);
7387     __get_user(host_tx->freq, &target_tx->freq);
7388     __get_user(host_tx->maxerror, &target_tx->maxerror);
7389     __get_user(host_tx->esterror, &target_tx->esterror);
7390     __get_user(host_tx->status, &target_tx->status);
7391     __get_user(host_tx->constant, &target_tx->constant);
7392     __get_user(host_tx->precision, &target_tx->precision);
7393     __get_user(host_tx->tolerance, &target_tx->tolerance);
7394     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7395     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7396     __get_user(host_tx->tick, &target_tx->tick);
7397     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7398     __get_user(host_tx->jitter, &target_tx->jitter);
7399     __get_user(host_tx->shift, &target_tx->shift);
7400     __get_user(host_tx->stabil, &target_tx->stabil);
7401     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7402     __get_user(host_tx->calcnt, &target_tx->calcnt);
7403     __get_user(host_tx->errcnt, &target_tx->errcnt);
7404     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7405     __get_user(host_tx->tai, &target_tx->tai);
7406 
7407     unlock_user_struct(target_tx, target_addr, 0);
7408     return 0;
7409 }
7410 
7411 static inline abi_long host_to_target_timex(abi_long target_addr,
7412                                             struct timex *host_tx)
7413 {
7414     struct target_timex *target_tx;
7415 
7416     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7417         return -TARGET_EFAULT;
7418     }
7419 
7420     __put_user(host_tx->modes, &target_tx->modes);
7421     __put_user(host_tx->offset, &target_tx->offset);
7422     __put_user(host_tx->freq, &target_tx->freq);
7423     __put_user(host_tx->maxerror, &target_tx->maxerror);
7424     __put_user(host_tx->esterror, &target_tx->esterror);
7425     __put_user(host_tx->status, &target_tx->status);
7426     __put_user(host_tx->constant, &target_tx->constant);
7427     __put_user(host_tx->precision, &target_tx->precision);
7428     __put_user(host_tx->tolerance, &target_tx->tolerance);
7429     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7430     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7431     __put_user(host_tx->tick, &target_tx->tick);
7432     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7433     __put_user(host_tx->jitter, &target_tx->jitter);
7434     __put_user(host_tx->shift, &target_tx->shift);
7435     __put_user(host_tx->stabil, &target_tx->stabil);
7436     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7437     __put_user(host_tx->calcnt, &target_tx->calcnt);
7438     __put_user(host_tx->errcnt, &target_tx->errcnt);
7439     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7440     __put_user(host_tx->tai, &target_tx->tai);
7441 
7442     unlock_user_struct(target_tx, target_addr, 1);
7443     return 0;
7444 }
7445 #endif
7446 
7447 
7448 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7449 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7450                                               abi_long target_addr)
7451 {
7452     struct target__kernel_timex *target_tx;
7453 
7454     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7455                                  offsetof(struct target__kernel_timex,
7456                                           time))) {
7457         return -TARGET_EFAULT;
7458     }
7459 
7460     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7461         return -TARGET_EFAULT;
7462     }
7463 
7464     __get_user(host_tx->modes, &target_tx->modes);
7465     __get_user(host_tx->offset, &target_tx->offset);
7466     __get_user(host_tx->freq, &target_tx->freq);
7467     __get_user(host_tx->maxerror, &target_tx->maxerror);
7468     __get_user(host_tx->esterror, &target_tx->esterror);
7469     __get_user(host_tx->status, &target_tx->status);
7470     __get_user(host_tx->constant, &target_tx->constant);
7471     __get_user(host_tx->precision, &target_tx->precision);
7472     __get_user(host_tx->tolerance, &target_tx->tolerance);
7473     __get_user(host_tx->tick, &target_tx->tick);
7474     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7475     __get_user(host_tx->jitter, &target_tx->jitter);
7476     __get_user(host_tx->shift, &target_tx->shift);
7477     __get_user(host_tx->stabil, &target_tx->stabil);
7478     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7479     __get_user(host_tx->calcnt, &target_tx->calcnt);
7480     __get_user(host_tx->errcnt, &target_tx->errcnt);
7481     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7482     __get_user(host_tx->tai, &target_tx->tai);
7483 
7484     unlock_user_struct(target_tx, target_addr, 0);
7485     return 0;
7486 }
7487 
7488 static inline abi_long host_to_target_timex64(abi_long target_addr,
7489                                               struct timex *host_tx)
7490 {
7491     struct target__kernel_timex *target_tx;
7492 
7493    if (copy_to_user_timeval64(target_addr +
7494                               offsetof(struct target__kernel_timex, time),
7495                               &host_tx->time)) {
7496         return -TARGET_EFAULT;
7497     }
7498 
7499     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7500         return -TARGET_EFAULT;
7501     }
7502 
7503     __put_user(host_tx->modes, &target_tx->modes);
7504     __put_user(host_tx->offset, &target_tx->offset);
7505     __put_user(host_tx->freq, &target_tx->freq);
7506     __put_user(host_tx->maxerror, &target_tx->maxerror);
7507     __put_user(host_tx->esterror, &target_tx->esterror);
7508     __put_user(host_tx->status, &target_tx->status);
7509     __put_user(host_tx->constant, &target_tx->constant);
7510     __put_user(host_tx->precision, &target_tx->precision);
7511     __put_user(host_tx->tolerance, &target_tx->tolerance);
7512     __put_user(host_tx->tick, &target_tx->tick);
7513     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7514     __put_user(host_tx->jitter, &target_tx->jitter);
7515     __put_user(host_tx->shift, &target_tx->shift);
7516     __put_user(host_tx->stabil, &target_tx->stabil);
7517     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7518     __put_user(host_tx->calcnt, &target_tx->calcnt);
7519     __put_user(host_tx->errcnt, &target_tx->errcnt);
7520     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7521     __put_user(host_tx->tai, &target_tx->tai);
7522 
7523     unlock_user_struct(target_tx, target_addr, 1);
7524     return 0;
7525 }
7526 #endif
7527 
7528 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7529 #define sigev_notify_thread_id _sigev_un._tid
7530 #endif
7531 
7532 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7533                                                abi_ulong target_addr)
7534 {
7535     struct target_sigevent *target_sevp;
7536 
7537     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7538         return -TARGET_EFAULT;
7539     }
7540 
7541     /* This union is awkward on 64 bit systems because it has a 32 bit
7542      * integer and a pointer in it; we follow the conversion approach
7543      * used for handling sigval types in signal.c so the guest should get
7544      * the correct value back even if we did a 64 bit byteswap and it's
7545      * using the 32 bit integer.
7546      */
7547     host_sevp->sigev_value.sival_ptr =
7548         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7549     host_sevp->sigev_signo =
7550         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7551     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7552     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7553 
7554     unlock_user_struct(target_sevp, target_addr, 1);
7555     return 0;
7556 }
7557 
7558 #if defined(TARGET_NR_mlockall)
7559 static inline int target_to_host_mlockall_arg(int arg)
7560 {
7561     int result = 0;
7562 
7563     if (arg & TARGET_MCL_CURRENT) {
7564         result |= MCL_CURRENT;
7565     }
7566     if (arg & TARGET_MCL_FUTURE) {
7567         result |= MCL_FUTURE;
7568     }
7569 #ifdef MCL_ONFAULT
7570     if (arg & TARGET_MCL_ONFAULT) {
7571         result |= MCL_ONFAULT;
7572     }
7573 #endif
7574 
7575     return result;
7576 }
7577 #endif
7578 
7579 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7580      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7581      defined(TARGET_NR_newfstatat))
7582 static inline abi_long host_to_target_stat64(void *cpu_env,
7583                                              abi_ulong target_addr,
7584                                              struct stat *host_st)
7585 {
7586 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7587     if (((CPUARMState *)cpu_env)->eabi) {
7588         struct target_eabi_stat64 *target_st;
7589 
7590         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7591             return -TARGET_EFAULT;
7592         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7593         __put_user(host_st->st_dev, &target_st->st_dev);
7594         __put_user(host_st->st_ino, &target_st->st_ino);
7595 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7596         __put_user(host_st->st_ino, &target_st->__st_ino);
7597 #endif
7598         __put_user(host_st->st_mode, &target_st->st_mode);
7599         __put_user(host_st->st_nlink, &target_st->st_nlink);
7600         __put_user(host_st->st_uid, &target_st->st_uid);
7601         __put_user(host_st->st_gid, &target_st->st_gid);
7602         __put_user(host_st->st_rdev, &target_st->st_rdev);
7603         __put_user(host_st->st_size, &target_st->st_size);
7604         __put_user(host_st->st_blksize, &target_st->st_blksize);
7605         __put_user(host_st->st_blocks, &target_st->st_blocks);
7606         __put_user(host_st->st_atime, &target_st->target_st_atime);
7607         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7608         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7609 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7610         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7611         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7612         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7613 #endif
7614         unlock_user_struct(target_st, target_addr, 1);
7615     } else
7616 #endif
7617     {
7618 #if defined(TARGET_HAS_STRUCT_STAT64)
7619         struct target_stat64 *target_st;
7620 #else
7621         struct target_stat *target_st;
7622 #endif
7623 
7624         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7625             return -TARGET_EFAULT;
7626         memset(target_st, 0, sizeof(*target_st));
7627         __put_user(host_st->st_dev, &target_st->st_dev);
7628         __put_user(host_st->st_ino, &target_st->st_ino);
7629 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7630         __put_user(host_st->st_ino, &target_st->__st_ino);
7631 #endif
7632         __put_user(host_st->st_mode, &target_st->st_mode);
7633         __put_user(host_st->st_nlink, &target_st->st_nlink);
7634         __put_user(host_st->st_uid, &target_st->st_uid);
7635         __put_user(host_st->st_gid, &target_st->st_gid);
7636         __put_user(host_st->st_rdev, &target_st->st_rdev);
7637         /* XXX: better use of kernel struct */
7638         __put_user(host_st->st_size, &target_st->st_size);
7639         __put_user(host_st->st_blksize, &target_st->st_blksize);
7640         __put_user(host_st->st_blocks, &target_st->st_blocks);
7641         __put_user(host_st->st_atime, &target_st->target_st_atime);
7642         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7643         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7644 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7645         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7646         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7647         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7648 #endif
7649         unlock_user_struct(target_st, target_addr, 1);
7650     }
7651 
7652     return 0;
7653 }
7654 #endif
7655 
7656 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7657 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7658                                             abi_ulong target_addr)
7659 {
7660     struct target_statx *target_stx;
7661 
7662     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7663         return -TARGET_EFAULT;
7664     }
7665     memset(target_stx, 0, sizeof(*target_stx));
7666 
7667     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7668     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7669     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7670     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7671     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7672     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7673     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7674     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7675     __put_user(host_stx->stx_size, &target_stx->stx_size);
7676     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7677     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7678     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7679     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7680     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7681     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7682     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7683     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7684     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7685     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7686     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7687     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7688     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7689     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7690 
7691     unlock_user_struct(target_stx, target_addr, 1);
7692 
7693     return 0;
7694 }
7695 #endif
7696 
7697 static int do_sys_futex(int *uaddr, int op, int val,
7698                          const struct timespec *timeout, int *uaddr2,
7699                          int val3)
7700 {
7701 #if HOST_LONG_BITS == 64
7702 #if defined(__NR_futex)
7703     /* always a 64-bit time_t, it doesn't define _time64 version  */
7704     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7705 
7706 #endif
7707 #else /* HOST_LONG_BITS == 64 */
7708 #if defined(__NR_futex_time64)
7709     if (sizeof(timeout->tv_sec) == 8) {
7710         /* _time64 function on 32bit arch */
7711         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7712     }
7713 #endif
7714 #if defined(__NR_futex)
7715     /* old function on 32bit arch */
7716     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7717 #endif
7718 #endif /* HOST_LONG_BITS == 64 */
7719     g_assert_not_reached();
7720 }
7721 
7722 static int do_safe_futex(int *uaddr, int op, int val,
7723                          const struct timespec *timeout, int *uaddr2,
7724                          int val3)
7725 {
7726 #if HOST_LONG_BITS == 64
7727 #if defined(__NR_futex)
7728     /* always a 64-bit time_t, it doesn't define _time64 version  */
7729     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7730 #endif
7731 #else /* HOST_LONG_BITS == 64 */
7732 #if defined(__NR_futex_time64)
7733     if (sizeof(timeout->tv_sec) == 8) {
7734         /* _time64 function on 32bit arch */
7735         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7736                                            val3));
7737     }
7738 #endif
7739 #if defined(__NR_futex)
7740     /* old function on 32bit arch */
7741     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7742 #endif
7743 #endif /* HOST_LONG_BITS == 64 */
7744     return -TARGET_ENOSYS;
7745 }
7746 
7747 /* ??? Using host futex calls even when target atomic operations
7748    are not really atomic probably breaks things.  However implementing
7749    futexes locally would make futexes shared between multiple processes
7750    tricky.  However they're probably useless because guest atomic
7751    operations won't work either.  */
7752 #if defined(TARGET_NR_futex)
7753 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7754                     target_ulong timeout, target_ulong uaddr2, int val3)
7755 {
7756     struct timespec ts, *pts;
7757     int base_op;
7758 
7759     /* ??? We assume FUTEX_* constants are the same on both host
7760        and target.  */
7761 #ifdef FUTEX_CMD_MASK
7762     base_op = op & FUTEX_CMD_MASK;
7763 #else
7764     base_op = op;
7765 #endif
7766     switch (base_op) {
7767     case FUTEX_WAIT:
7768     case FUTEX_WAIT_BITSET:
7769         if (timeout) {
7770             pts = &ts;
7771             target_to_host_timespec(pts, timeout);
7772         } else {
7773             pts = NULL;
7774         }
7775         return do_safe_futex(g2h(cpu, uaddr),
7776                              op, tswap32(val), pts, NULL, val3);
7777     case FUTEX_WAKE:
7778         return do_safe_futex(g2h(cpu, uaddr),
7779                              op, val, NULL, NULL, 0);
7780     case FUTEX_FD:
7781         return do_safe_futex(g2h(cpu, uaddr),
7782                              op, val, NULL, NULL, 0);
7783     case FUTEX_REQUEUE:
7784     case FUTEX_CMP_REQUEUE:
7785     case FUTEX_WAKE_OP:
7786         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7787            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7788            But the prototype takes a `struct timespec *'; insert casts
7789            to satisfy the compiler.  We do not need to tswap TIMEOUT
7790            since it's not compared to guest memory.  */
7791         pts = (struct timespec *)(uintptr_t) timeout;
7792         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7793                              (base_op == FUTEX_CMP_REQUEUE
7794                               ? tswap32(val3) : val3));
7795     default:
7796         return -TARGET_ENOSYS;
7797     }
7798 }
7799 #endif
7800 
7801 #if defined(TARGET_NR_futex_time64)
7802 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7803                            int val, target_ulong timeout,
7804                            target_ulong uaddr2, int val3)
7805 {
7806     struct timespec ts, *pts;
7807     int base_op;
7808 
7809     /* ??? We assume FUTEX_* constants are the same on both host
7810        and target.  */
7811 #ifdef FUTEX_CMD_MASK
7812     base_op = op & FUTEX_CMD_MASK;
7813 #else
7814     base_op = op;
7815 #endif
7816     switch (base_op) {
7817     case FUTEX_WAIT:
7818     case FUTEX_WAIT_BITSET:
7819         if (timeout) {
7820             pts = &ts;
7821             if (target_to_host_timespec64(pts, timeout)) {
7822                 return -TARGET_EFAULT;
7823             }
7824         } else {
7825             pts = NULL;
7826         }
7827         return do_safe_futex(g2h(cpu, uaddr), op,
7828                              tswap32(val), pts, NULL, val3);
7829     case FUTEX_WAKE:
7830         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7831     case FUTEX_FD:
7832         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7833     case FUTEX_REQUEUE:
7834     case FUTEX_CMP_REQUEUE:
7835     case FUTEX_WAKE_OP:
7836         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7837            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7838            But the prototype takes a `struct timespec *'; insert casts
7839            to satisfy the compiler.  We do not need to tswap TIMEOUT
7840            since it's not compared to guest memory.  */
7841         pts = (struct timespec *)(uintptr_t) timeout;
7842         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7843                              (base_op == FUTEX_CMP_REQUEUE
7844                               ? tswap32(val3) : val3));
7845     default:
7846         return -TARGET_ENOSYS;
7847     }
7848 }
7849 #endif
7850 
7851 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7852 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7853                                      abi_long handle, abi_long mount_id,
7854                                      abi_long flags)
7855 {
7856     struct file_handle *target_fh;
7857     struct file_handle *fh;
7858     int mid = 0;
7859     abi_long ret;
7860     char *name;
7861     unsigned int size, total_size;
7862 
7863     if (get_user_s32(size, handle)) {
7864         return -TARGET_EFAULT;
7865     }
7866 
7867     name = lock_user_string(pathname);
7868     if (!name) {
7869         return -TARGET_EFAULT;
7870     }
7871 
7872     total_size = sizeof(struct file_handle) + size;
7873     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7874     if (!target_fh) {
7875         unlock_user(name, pathname, 0);
7876         return -TARGET_EFAULT;
7877     }
7878 
7879     fh = g_malloc0(total_size);
7880     fh->handle_bytes = size;
7881 
7882     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7883     unlock_user(name, pathname, 0);
7884 
7885     /* man name_to_handle_at(2):
7886      * Other than the use of the handle_bytes field, the caller should treat
7887      * the file_handle structure as an opaque data type
7888      */
7889 
7890     memcpy(target_fh, fh, total_size);
7891     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7892     target_fh->handle_type = tswap32(fh->handle_type);
7893     g_free(fh);
7894     unlock_user(target_fh, handle, total_size);
7895 
7896     if (put_user_s32(mid, mount_id)) {
7897         return -TARGET_EFAULT;
7898     }
7899 
7900     return ret;
7901 
7902 }
7903 #endif
7904 
7905 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7906 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7907                                      abi_long flags)
7908 {
7909     struct file_handle *target_fh;
7910     struct file_handle *fh;
7911     unsigned int size, total_size;
7912     abi_long ret;
7913 
7914     if (get_user_s32(size, handle)) {
7915         return -TARGET_EFAULT;
7916     }
7917 
7918     total_size = sizeof(struct file_handle) + size;
7919     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7920     if (!target_fh) {
7921         return -TARGET_EFAULT;
7922     }
7923 
7924     fh = g_memdup(target_fh, total_size);
7925     fh->handle_bytes = size;
7926     fh->handle_type = tswap32(target_fh->handle_type);
7927 
7928     ret = get_errno(open_by_handle_at(mount_fd, fh,
7929                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7930 
7931     g_free(fh);
7932 
7933     unlock_user(target_fh, handle, total_size);
7934 
7935     return ret;
7936 }
7937 #endif
7938 
7939 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7940 
7941 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7942 {
7943     int host_flags;
7944     target_sigset_t *target_mask;
7945     sigset_t host_mask;
7946     abi_long ret;
7947 
7948     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7949         return -TARGET_EINVAL;
7950     }
7951     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7952         return -TARGET_EFAULT;
7953     }
7954 
7955     target_to_host_sigset(&host_mask, target_mask);
7956 
7957     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7958 
7959     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7960     if (ret >= 0) {
7961         fd_trans_register(ret, &target_signalfd_trans);
7962     }
7963 
7964     unlock_user_struct(target_mask, mask, 0);
7965 
7966     return ret;
7967 }
7968 #endif
7969 
7970 /* Map host to target signal numbers for the wait family of syscalls.
7971    Assume all other status bits are the same.  */
7972 int host_to_target_waitstatus(int status)
7973 {
7974     if (WIFSIGNALED(status)) {
7975         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7976     }
7977     if (WIFSTOPPED(status)) {
7978         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7979                | (status & 0xff);
7980     }
7981     return status;
7982 }
7983 
7984 static int open_self_cmdline(void *cpu_env, int fd)
7985 {
7986     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7987     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7988     int i;
7989 
7990     for (i = 0; i < bprm->argc; i++) {
7991         size_t len = strlen(bprm->argv[i]) + 1;
7992 
7993         if (write(fd, bprm->argv[i], len) != len) {
7994             return -1;
7995         }
7996     }
7997 
7998     return 0;
7999 }
8000 
8001 static int open_self_maps(void *cpu_env, int fd)
8002 {
8003     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8004     TaskState *ts = cpu->opaque;
8005     GSList *map_info = read_self_maps();
8006     GSList *s;
8007     int count;
8008 
8009     for (s = map_info; s; s = g_slist_next(s)) {
8010         MapInfo *e = (MapInfo *) s->data;
8011 
8012         if (h2g_valid(e->start)) {
8013             unsigned long min = e->start;
8014             unsigned long max = e->end;
8015             int flags = page_get_flags(h2g(min));
8016             const char *path;
8017 
8018             max = h2g_valid(max - 1) ?
8019                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8020 
8021             if (page_check_range(h2g(min), max - min, flags) == -1) {
8022                 continue;
8023             }
8024 
8025             if (h2g(min) == ts->info->stack_limit) {
8026                 path = "[stack]";
8027             } else {
8028                 path = e->path;
8029             }
8030 
8031             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8032                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8033                             h2g(min), h2g(max - 1) + 1,
8034                             (flags & PAGE_READ) ? 'r' : '-',
8035                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8036                             (flags & PAGE_EXEC) ? 'x' : '-',
8037                             e->is_priv ? 'p' : 's',
8038                             (uint64_t) e->offset, e->dev, e->inode);
8039             if (path) {
8040                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8041             } else {
8042                 dprintf(fd, "\n");
8043             }
8044         }
8045     }
8046 
8047     free_self_maps(map_info);
8048 
8049 #ifdef TARGET_VSYSCALL_PAGE
8050     /*
8051      * We only support execution from the vsyscall page.
8052      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8053      */
8054     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8055                     " --xp 00000000 00:00 0",
8056                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8057     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8058 #endif
8059 
8060     return 0;
8061 }
8062 
8063 static int open_self_stat(void *cpu_env, int fd)
8064 {
8065     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8066     TaskState *ts = cpu->opaque;
8067     g_autoptr(GString) buf = g_string_new(NULL);
8068     int i;
8069 
8070     for (i = 0; i < 44; i++) {
8071         if (i == 0) {
8072             /* pid */
8073             g_string_printf(buf, FMT_pid " ", getpid());
8074         } else if (i == 1) {
8075             /* app name */
8076             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8077             bin = bin ? bin + 1 : ts->bprm->argv[0];
8078             g_string_printf(buf, "(%.15s) ", bin);
8079         } else if (i == 3) {
8080             /* ppid */
8081             g_string_printf(buf, FMT_pid " ", getppid());
8082         } else if (i == 21) {
8083             /* starttime */
8084             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8085         } else if (i == 27) {
8086             /* stack bottom */
8087             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8088         } else {
8089             /* for the rest, there is MasterCard */
8090             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8091         }
8092 
8093         if (write(fd, buf->str, buf->len) != buf->len) {
8094             return -1;
8095         }
8096     }
8097 
8098     return 0;
8099 }
8100 
8101 static int open_self_auxv(void *cpu_env, int fd)
8102 {
8103     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8104     TaskState *ts = cpu->opaque;
8105     abi_ulong auxv = ts->info->saved_auxv;
8106     abi_ulong len = ts->info->auxv_len;
8107     char *ptr;
8108 
8109     /*
8110      * Auxiliary vector is stored in target process stack.
8111      * read in whole auxv vector and copy it to file
8112      */
8113     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8114     if (ptr != NULL) {
8115         while (len > 0) {
8116             ssize_t r;
8117             r = write(fd, ptr, len);
8118             if (r <= 0) {
8119                 break;
8120             }
8121             len -= r;
8122             ptr += r;
8123         }
8124         lseek(fd, 0, SEEK_SET);
8125         unlock_user(ptr, auxv, len);
8126     }
8127 
8128     return 0;
8129 }
8130 
8131 static int is_proc_myself(const char *filename, const char *entry)
8132 {
8133     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8134         filename += strlen("/proc/");
8135         if (!strncmp(filename, "self/", strlen("self/"))) {
8136             filename += strlen("self/");
8137         } else if (*filename >= '1' && *filename <= '9') {
8138             char myself[80];
8139             snprintf(myself, sizeof(myself), "%d/", getpid());
8140             if (!strncmp(filename, myself, strlen(myself))) {
8141                 filename += strlen(myself);
8142             } else {
8143                 return 0;
8144             }
8145         } else {
8146             return 0;
8147         }
8148         if (!strcmp(filename, entry)) {
8149             return 1;
8150         }
8151     }
8152     return 0;
8153 }
8154 
8155 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8156     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8157 static int is_proc(const char *filename, const char *entry)
8158 {
8159     return strcmp(filename, entry) == 0;
8160 }
8161 #endif
8162 
8163 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8164 static int open_net_route(void *cpu_env, int fd)
8165 {
8166     FILE *fp;
8167     char *line = NULL;
8168     size_t len = 0;
8169     ssize_t read;
8170 
8171     fp = fopen("/proc/net/route", "r");
8172     if (fp == NULL) {
8173         return -1;
8174     }
8175 
8176     /* read header */
8177 
8178     read = getline(&line, &len, fp);
8179     dprintf(fd, "%s", line);
8180 
8181     /* read routes */
8182 
8183     while ((read = getline(&line, &len, fp)) != -1) {
8184         char iface[16];
8185         uint32_t dest, gw, mask;
8186         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8187         int fields;
8188 
8189         fields = sscanf(line,
8190                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8191                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8192                         &mask, &mtu, &window, &irtt);
8193         if (fields != 11) {
8194             continue;
8195         }
8196         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8197                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8198                 metric, tswap32(mask), mtu, window, irtt);
8199     }
8200 
8201     free(line);
8202     fclose(fp);
8203 
8204     return 0;
8205 }
8206 #endif
8207 
8208 #if defined(TARGET_SPARC)
8209 static int open_cpuinfo(void *cpu_env, int fd)
8210 {
8211     dprintf(fd, "type\t\t: sun4u\n");
8212     return 0;
8213 }
8214 #endif
8215 
8216 #if defined(TARGET_HPPA)
8217 static int open_cpuinfo(void *cpu_env, int fd)
8218 {
8219     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8220     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8221     dprintf(fd, "capabilities\t: os32\n");
8222     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8223     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8224     return 0;
8225 }
8226 #endif
8227 
8228 #if defined(TARGET_M68K)
8229 static int open_hardware(void *cpu_env, int fd)
8230 {
8231     dprintf(fd, "Model:\t\tqemu-m68k\n");
8232     return 0;
8233 }
8234 #endif
8235 
8236 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8237 {
8238     struct fake_open {
8239         const char *filename;
8240         int (*fill)(void *cpu_env, int fd);
8241         int (*cmp)(const char *s1, const char *s2);
8242     };
8243     const struct fake_open *fake_open;
8244     static const struct fake_open fakes[] = {
8245         { "maps", open_self_maps, is_proc_myself },
8246         { "stat", open_self_stat, is_proc_myself },
8247         { "auxv", open_self_auxv, is_proc_myself },
8248         { "cmdline", open_self_cmdline, is_proc_myself },
8249 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8250         { "/proc/net/route", open_net_route, is_proc },
8251 #endif
8252 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8253         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8254 #endif
8255 #if defined(TARGET_M68K)
8256         { "/proc/hardware", open_hardware, is_proc },
8257 #endif
8258         { NULL, NULL, NULL }
8259     };
8260 
8261     if (is_proc_myself(pathname, "exe")) {
8262         int execfd = qemu_getauxval(AT_EXECFD);
8263         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8264     }
8265 
8266     for (fake_open = fakes; fake_open->filename; fake_open++) {
8267         if (fake_open->cmp(pathname, fake_open->filename)) {
8268             break;
8269         }
8270     }
8271 
8272     if (fake_open->filename) {
8273         const char *tmpdir;
8274         char filename[PATH_MAX];
8275         int fd, r;
8276 
8277         /* create temporary file to map stat to */
8278         tmpdir = getenv("TMPDIR");
8279         if (!tmpdir)
8280             tmpdir = "/tmp";
8281         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8282         fd = mkstemp(filename);
8283         if (fd < 0) {
8284             return fd;
8285         }
8286         unlink(filename);
8287 
8288         if ((r = fake_open->fill(cpu_env, fd))) {
8289             int e = errno;
8290             close(fd);
8291             errno = e;
8292             return r;
8293         }
8294         lseek(fd, 0, SEEK_SET);
8295 
8296         return fd;
8297     }
8298 
8299     return safe_openat(dirfd, path(pathname), flags, mode);
8300 }
8301 
8302 #define TIMER_MAGIC 0x0caf0000
8303 #define TIMER_MAGIC_MASK 0xffff0000
8304 
8305 /* Convert QEMU provided timer ID back to internal 16bit index format */
8306 static target_timer_t get_timer_id(abi_long arg)
8307 {
8308     target_timer_t timerid = arg;
8309 
8310     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8311         return -TARGET_EINVAL;
8312     }
8313 
8314     timerid &= 0xffff;
8315 
8316     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8317         return -TARGET_EINVAL;
8318     }
8319 
8320     return timerid;
8321 }
8322 
8323 static int target_to_host_cpu_mask(unsigned long *host_mask,
8324                                    size_t host_size,
8325                                    abi_ulong target_addr,
8326                                    size_t target_size)
8327 {
8328     unsigned target_bits = sizeof(abi_ulong) * 8;
8329     unsigned host_bits = sizeof(*host_mask) * 8;
8330     abi_ulong *target_mask;
8331     unsigned i, j;
8332 
8333     assert(host_size >= target_size);
8334 
8335     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8336     if (!target_mask) {
8337         return -TARGET_EFAULT;
8338     }
8339     memset(host_mask, 0, host_size);
8340 
8341     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8342         unsigned bit = i * target_bits;
8343         abi_ulong val;
8344 
8345         __get_user(val, &target_mask[i]);
8346         for (j = 0; j < target_bits; j++, bit++) {
8347             if (val & (1UL << j)) {
8348                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8349             }
8350         }
8351     }
8352 
8353     unlock_user(target_mask, target_addr, 0);
8354     return 0;
8355 }
8356 
8357 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8358                                    size_t host_size,
8359                                    abi_ulong target_addr,
8360                                    size_t target_size)
8361 {
8362     unsigned target_bits = sizeof(abi_ulong) * 8;
8363     unsigned host_bits = sizeof(*host_mask) * 8;
8364     abi_ulong *target_mask;
8365     unsigned i, j;
8366 
8367     assert(host_size >= target_size);
8368 
8369     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8370     if (!target_mask) {
8371         return -TARGET_EFAULT;
8372     }
8373 
8374     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8375         unsigned bit = i * target_bits;
8376         abi_ulong val = 0;
8377 
8378         for (j = 0; j < target_bits; j++, bit++) {
8379             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8380                 val |= 1UL << j;
8381             }
8382         }
8383         __put_user(val, &target_mask[i]);
8384     }
8385 
8386     unlock_user(target_mask, target_addr, target_size);
8387     return 0;
8388 }
8389 
8390 #ifdef TARGET_NR_getdents
8391 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8392 {
8393     g_autofree void *hdirp = NULL;
8394     void *tdirp;
8395     int hlen, hoff, toff;
8396     int hreclen, treclen;
8397     off64_t prev_diroff = 0;
8398 
8399     hdirp = g_try_malloc(count);
8400     if (!hdirp) {
8401         return -TARGET_ENOMEM;
8402     }
8403 
8404 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8405     hlen = sys_getdents(dirfd, hdirp, count);
8406 #else
8407     hlen = sys_getdents64(dirfd, hdirp, count);
8408 #endif
8409 
8410     hlen = get_errno(hlen);
8411     if (is_error(hlen)) {
8412         return hlen;
8413     }
8414 
8415     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8416     if (!tdirp) {
8417         return -TARGET_EFAULT;
8418     }
8419 
8420     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8421 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8422         struct linux_dirent *hde = hdirp + hoff;
8423 #else
8424         struct linux_dirent64 *hde = hdirp + hoff;
8425 #endif
8426         struct target_dirent *tde = tdirp + toff;
8427         int namelen;
8428         uint8_t type;
8429 
8430         namelen = strlen(hde->d_name);
8431         hreclen = hde->d_reclen;
8432         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8433         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8434 
8435         if (toff + treclen > count) {
8436             /*
8437              * If the host struct is smaller than the target struct, or
8438              * requires less alignment and thus packs into less space,
8439              * then the host can return more entries than we can pass
8440              * on to the guest.
8441              */
8442             if (toff == 0) {
8443                 toff = -TARGET_EINVAL; /* result buffer is too small */
8444                 break;
8445             }
8446             /*
8447              * Return what we have, resetting the file pointer to the
8448              * location of the first record not returned.
8449              */
8450             lseek64(dirfd, prev_diroff, SEEK_SET);
8451             break;
8452         }
8453 
8454         prev_diroff = hde->d_off;
8455         tde->d_ino = tswapal(hde->d_ino);
8456         tde->d_off = tswapal(hde->d_off);
8457         tde->d_reclen = tswap16(treclen);
8458         memcpy(tde->d_name, hde->d_name, namelen + 1);
8459 
8460         /*
8461          * The getdents type is in what was formerly a padding byte at the
8462          * end of the structure.
8463          */
8464 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8465         type = *((uint8_t *)hde + hreclen - 1);
8466 #else
8467         type = hde->d_type;
8468 #endif
8469         *((uint8_t *)tde + treclen - 1) = type;
8470     }
8471 
8472     unlock_user(tdirp, arg2, toff);
8473     return toff;
8474 }
8475 #endif /* TARGET_NR_getdents */
8476 
8477 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8478 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8479 {
8480     g_autofree void *hdirp = NULL;
8481     void *tdirp;
8482     int hlen, hoff, toff;
8483     int hreclen, treclen;
8484     off64_t prev_diroff = 0;
8485 
8486     hdirp = g_try_malloc(count);
8487     if (!hdirp) {
8488         return -TARGET_ENOMEM;
8489     }
8490 
8491     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8492     if (is_error(hlen)) {
8493         return hlen;
8494     }
8495 
8496     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8497     if (!tdirp) {
8498         return -TARGET_EFAULT;
8499     }
8500 
8501     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8502         struct linux_dirent64 *hde = hdirp + hoff;
8503         struct target_dirent64 *tde = tdirp + toff;
8504         int namelen;
8505 
8506         namelen = strlen(hde->d_name) + 1;
8507         hreclen = hde->d_reclen;
8508         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8509         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8510 
8511         if (toff + treclen > count) {
8512             /*
8513              * If the host struct is smaller than the target struct, or
8514              * requires less alignment and thus packs into less space,
8515              * then the host can return more entries than we can pass
8516              * on to the guest.
8517              */
8518             if (toff == 0) {
8519                 toff = -TARGET_EINVAL; /* result buffer is too small */
8520                 break;
8521             }
8522             /*
8523              * Return what we have, resetting the file pointer to the
8524              * location of the first record not returned.
8525              */
8526             lseek64(dirfd, prev_diroff, SEEK_SET);
8527             break;
8528         }
8529 
8530         prev_diroff = hde->d_off;
8531         tde->d_ino = tswap64(hde->d_ino);
8532         tde->d_off = tswap64(hde->d_off);
8533         tde->d_reclen = tswap16(treclen);
8534         tde->d_type = hde->d_type;
8535         memcpy(tde->d_name, hde->d_name, namelen);
8536     }
8537 
8538     unlock_user(tdirp, arg2, toff);
8539     return toff;
8540 }
8541 #endif /* TARGET_NR_getdents64 */
8542 
8543 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8544 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8545 #endif
8546 
8547 /* This is an internal helper for do_syscall so that it is easier
8548  * to have a single return point, so that actions, such as logging
8549  * of syscall results, can be performed.
8550  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8551  */
8552 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8553                             abi_long arg2, abi_long arg3, abi_long arg4,
8554                             abi_long arg5, abi_long arg6, abi_long arg7,
8555                             abi_long arg8)
8556 {
8557     CPUState *cpu = env_cpu(cpu_env);
8558     abi_long ret;
8559 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8560     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8561     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8562     || defined(TARGET_NR_statx)
8563     struct stat st;
8564 #endif
8565 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8566     || defined(TARGET_NR_fstatfs)
8567     struct statfs stfs;
8568 #endif
8569     void *p;
8570 
8571     switch(num) {
8572     case TARGET_NR_exit:
8573         /* In old applications this may be used to implement _exit(2).
8574            However in threaded applications it is used for thread termination,
8575            and _exit_group is used for application termination.
8576            Do thread termination if we have more then one thread.  */
8577 
8578         if (block_signals()) {
8579             return -QEMU_ERESTARTSYS;
8580         }
8581 
8582         pthread_mutex_lock(&clone_lock);
8583 
8584         if (CPU_NEXT(first_cpu)) {
8585             TaskState *ts = cpu->opaque;
8586 
8587             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8588             object_unref(OBJECT(cpu));
8589             /*
8590              * At this point the CPU should be unrealized and removed
8591              * from cpu lists. We can clean-up the rest of the thread
8592              * data without the lock held.
8593              */
8594 
8595             pthread_mutex_unlock(&clone_lock);
8596 
8597             if (ts->child_tidptr) {
8598                 put_user_u32(0, ts->child_tidptr);
8599                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8600                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8601             }
8602             thread_cpu = NULL;
8603             g_free(ts);
8604             rcu_unregister_thread();
8605             pthread_exit(NULL);
8606         }
8607 
8608         pthread_mutex_unlock(&clone_lock);
8609         preexit_cleanup(cpu_env, arg1);
8610         _exit(arg1);
8611         return 0; /* avoid warning */
8612     case TARGET_NR_read:
8613         if (arg2 == 0 && arg3 == 0) {
8614             return get_errno(safe_read(arg1, 0, 0));
8615         } else {
8616             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8617                 return -TARGET_EFAULT;
8618             ret = get_errno(safe_read(arg1, p, arg3));
8619             if (ret >= 0 &&
8620                 fd_trans_host_to_target_data(arg1)) {
8621                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8622             }
8623             unlock_user(p, arg2, ret);
8624         }
8625         return ret;
8626     case TARGET_NR_write:
8627         if (arg2 == 0 && arg3 == 0) {
8628             return get_errno(safe_write(arg1, 0, 0));
8629         }
8630         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8631             return -TARGET_EFAULT;
8632         if (fd_trans_target_to_host_data(arg1)) {
8633             void *copy = g_malloc(arg3);
8634             memcpy(copy, p, arg3);
8635             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8636             if (ret >= 0) {
8637                 ret = get_errno(safe_write(arg1, copy, ret));
8638             }
8639             g_free(copy);
8640         } else {
8641             ret = get_errno(safe_write(arg1, p, arg3));
8642         }
8643         unlock_user(p, arg2, 0);
8644         return ret;
8645 
8646 #ifdef TARGET_NR_open
8647     case TARGET_NR_open:
8648         if (!(p = lock_user_string(arg1)))
8649             return -TARGET_EFAULT;
8650         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8651                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8652                                   arg3));
8653         fd_trans_unregister(ret);
8654         unlock_user(p, arg1, 0);
8655         return ret;
8656 #endif
8657     case TARGET_NR_openat:
8658         if (!(p = lock_user_string(arg2)))
8659             return -TARGET_EFAULT;
8660         ret = get_errno(do_openat(cpu_env, arg1, p,
8661                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8662                                   arg4));
8663         fd_trans_unregister(ret);
8664         unlock_user(p, arg2, 0);
8665         return ret;
8666 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8667     case TARGET_NR_name_to_handle_at:
8668         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8669         return ret;
8670 #endif
8671 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8672     case TARGET_NR_open_by_handle_at:
8673         ret = do_open_by_handle_at(arg1, arg2, arg3);
8674         fd_trans_unregister(ret);
8675         return ret;
8676 #endif
8677     case TARGET_NR_close:
8678         fd_trans_unregister(arg1);
8679         return get_errno(close(arg1));
8680 
8681     case TARGET_NR_brk:
8682         return do_brk(arg1);
8683 #ifdef TARGET_NR_fork
8684     case TARGET_NR_fork:
8685         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8686 #endif
8687 #ifdef TARGET_NR_waitpid
8688     case TARGET_NR_waitpid:
8689         {
8690             int status;
8691             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8692             if (!is_error(ret) && arg2 && ret
8693                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8694                 return -TARGET_EFAULT;
8695         }
8696         return ret;
8697 #endif
8698 #ifdef TARGET_NR_waitid
8699     case TARGET_NR_waitid:
8700         {
8701             siginfo_t info;
8702             info.si_pid = 0;
8703             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8704             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8705                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8706                     return -TARGET_EFAULT;
8707                 host_to_target_siginfo(p, &info);
8708                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8709             }
8710         }
8711         return ret;
8712 #endif
8713 #ifdef TARGET_NR_creat /* not on alpha */
8714     case TARGET_NR_creat:
8715         if (!(p = lock_user_string(arg1)))
8716             return -TARGET_EFAULT;
8717         ret = get_errno(creat(p, arg2));
8718         fd_trans_unregister(ret);
8719         unlock_user(p, arg1, 0);
8720         return ret;
8721 #endif
8722 #ifdef TARGET_NR_link
8723     case TARGET_NR_link:
8724         {
8725             void * p2;
8726             p = lock_user_string(arg1);
8727             p2 = lock_user_string(arg2);
8728             if (!p || !p2)
8729                 ret = -TARGET_EFAULT;
8730             else
8731                 ret = get_errno(link(p, p2));
8732             unlock_user(p2, arg2, 0);
8733             unlock_user(p, arg1, 0);
8734         }
8735         return ret;
8736 #endif
8737 #if defined(TARGET_NR_linkat)
8738     case TARGET_NR_linkat:
8739         {
8740             void * p2 = NULL;
8741             if (!arg2 || !arg4)
8742                 return -TARGET_EFAULT;
8743             p  = lock_user_string(arg2);
8744             p2 = lock_user_string(arg4);
8745             if (!p || !p2)
8746                 ret = -TARGET_EFAULT;
8747             else
8748                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8749             unlock_user(p, arg2, 0);
8750             unlock_user(p2, arg4, 0);
8751         }
8752         return ret;
8753 #endif
8754 #ifdef TARGET_NR_unlink
8755     case TARGET_NR_unlink:
8756         if (!(p = lock_user_string(arg1)))
8757             return -TARGET_EFAULT;
8758         ret = get_errno(unlink(p));
8759         unlock_user(p, arg1, 0);
8760         return ret;
8761 #endif
8762 #if defined(TARGET_NR_unlinkat)
8763     case TARGET_NR_unlinkat:
8764         if (!(p = lock_user_string(arg2)))
8765             return -TARGET_EFAULT;
8766         ret = get_errno(unlinkat(arg1, p, arg3));
8767         unlock_user(p, arg2, 0);
8768         return ret;
8769 #endif
8770     case TARGET_NR_execve:
8771         {
8772             char **argp, **envp;
8773             int argc, envc;
8774             abi_ulong gp;
8775             abi_ulong guest_argp;
8776             abi_ulong guest_envp;
8777             abi_ulong addr;
8778             char **q;
8779 
8780             argc = 0;
8781             guest_argp = arg2;
8782             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8783                 if (get_user_ual(addr, gp))
8784                     return -TARGET_EFAULT;
8785                 if (!addr)
8786                     break;
8787                 argc++;
8788             }
8789             envc = 0;
8790             guest_envp = arg3;
8791             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8792                 if (get_user_ual(addr, gp))
8793                     return -TARGET_EFAULT;
8794                 if (!addr)
8795                     break;
8796                 envc++;
8797             }
8798 
8799             argp = g_new0(char *, argc + 1);
8800             envp = g_new0(char *, envc + 1);
8801 
8802             for (gp = guest_argp, q = argp; gp;
8803                   gp += sizeof(abi_ulong), q++) {
8804                 if (get_user_ual(addr, gp))
8805                     goto execve_efault;
8806                 if (!addr)
8807                     break;
8808                 if (!(*q = lock_user_string(addr)))
8809                     goto execve_efault;
8810             }
8811             *q = NULL;
8812 
8813             for (gp = guest_envp, q = envp; gp;
8814                   gp += sizeof(abi_ulong), q++) {
8815                 if (get_user_ual(addr, gp))
8816                     goto execve_efault;
8817                 if (!addr)
8818                     break;
8819                 if (!(*q = lock_user_string(addr)))
8820                     goto execve_efault;
8821             }
8822             *q = NULL;
8823 
8824             if (!(p = lock_user_string(arg1)))
8825                 goto execve_efault;
8826             /* Although execve() is not an interruptible syscall it is
8827              * a special case where we must use the safe_syscall wrapper:
8828              * if we allow a signal to happen before we make the host
8829              * syscall then we will 'lose' it, because at the point of
8830              * execve the process leaves QEMU's control. So we use the
8831              * safe syscall wrapper to ensure that we either take the
8832              * signal as a guest signal, or else it does not happen
8833              * before the execve completes and makes it the other
8834              * program's problem.
8835              */
8836             ret = get_errno(safe_execve(p, argp, envp));
8837             unlock_user(p, arg1, 0);
8838 
8839             goto execve_end;
8840 
8841         execve_efault:
8842             ret = -TARGET_EFAULT;
8843 
8844         execve_end:
8845             for (gp = guest_argp, q = argp; *q;
8846                   gp += sizeof(abi_ulong), q++) {
8847                 if (get_user_ual(addr, gp)
8848                     || !addr)
8849                     break;
8850                 unlock_user(*q, addr, 0);
8851             }
8852             for (gp = guest_envp, q = envp; *q;
8853                   gp += sizeof(abi_ulong), q++) {
8854                 if (get_user_ual(addr, gp)
8855                     || !addr)
8856                     break;
8857                 unlock_user(*q, addr, 0);
8858             }
8859 
8860             g_free(argp);
8861             g_free(envp);
8862         }
8863         return ret;
8864     case TARGET_NR_chdir:
8865         if (!(p = lock_user_string(arg1)))
8866             return -TARGET_EFAULT;
8867         ret = get_errno(chdir(p));
8868         unlock_user(p, arg1, 0);
8869         return ret;
8870 #ifdef TARGET_NR_time
8871     case TARGET_NR_time:
8872         {
8873             time_t host_time;
8874             ret = get_errno(time(&host_time));
8875             if (!is_error(ret)
8876                 && arg1
8877                 && put_user_sal(host_time, arg1))
8878                 return -TARGET_EFAULT;
8879         }
8880         return ret;
8881 #endif
8882 #ifdef TARGET_NR_mknod
8883     case TARGET_NR_mknod:
8884         if (!(p = lock_user_string(arg1)))
8885             return -TARGET_EFAULT;
8886         ret = get_errno(mknod(p, arg2, arg3));
8887         unlock_user(p, arg1, 0);
8888         return ret;
8889 #endif
8890 #if defined(TARGET_NR_mknodat)
8891     case TARGET_NR_mknodat:
8892         if (!(p = lock_user_string(arg2)))
8893             return -TARGET_EFAULT;
8894         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8895         unlock_user(p, arg2, 0);
8896         return ret;
8897 #endif
8898 #ifdef TARGET_NR_chmod
8899     case TARGET_NR_chmod:
8900         if (!(p = lock_user_string(arg1)))
8901             return -TARGET_EFAULT;
8902         ret = get_errno(chmod(p, arg2));
8903         unlock_user(p, arg1, 0);
8904         return ret;
8905 #endif
8906 #ifdef TARGET_NR_lseek
8907     case TARGET_NR_lseek:
8908         return get_errno(lseek(arg1, arg2, arg3));
8909 #endif
8910 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8911     /* Alpha specific */
8912     case TARGET_NR_getxpid:
8913         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8914         return get_errno(getpid());
8915 #endif
8916 #ifdef TARGET_NR_getpid
8917     case TARGET_NR_getpid:
8918         return get_errno(getpid());
8919 #endif
8920     case TARGET_NR_mount:
8921         {
8922             /* need to look at the data field */
8923             void *p2, *p3;
8924 
8925             if (arg1) {
8926                 p = lock_user_string(arg1);
8927                 if (!p) {
8928                     return -TARGET_EFAULT;
8929                 }
8930             } else {
8931                 p = NULL;
8932             }
8933 
8934             p2 = lock_user_string(arg2);
8935             if (!p2) {
8936                 if (arg1) {
8937                     unlock_user(p, arg1, 0);
8938                 }
8939                 return -TARGET_EFAULT;
8940             }
8941 
8942             if (arg3) {
8943                 p3 = lock_user_string(arg3);
8944                 if (!p3) {
8945                     if (arg1) {
8946                         unlock_user(p, arg1, 0);
8947                     }
8948                     unlock_user(p2, arg2, 0);
8949                     return -TARGET_EFAULT;
8950                 }
8951             } else {
8952                 p3 = NULL;
8953             }
8954 
8955             /* FIXME - arg5 should be locked, but it isn't clear how to
8956              * do that since it's not guaranteed to be a NULL-terminated
8957              * string.
8958              */
8959             if (!arg5) {
8960                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8961             } else {
8962                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8963             }
8964             ret = get_errno(ret);
8965 
8966             if (arg1) {
8967                 unlock_user(p, arg1, 0);
8968             }
8969             unlock_user(p2, arg2, 0);
8970             if (arg3) {
8971                 unlock_user(p3, arg3, 0);
8972             }
8973         }
8974         return ret;
8975 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8976 #if defined(TARGET_NR_umount)
8977     case TARGET_NR_umount:
8978 #endif
8979 #if defined(TARGET_NR_oldumount)
8980     case TARGET_NR_oldumount:
8981 #endif
8982         if (!(p = lock_user_string(arg1)))
8983             return -TARGET_EFAULT;
8984         ret = get_errno(umount(p));
8985         unlock_user(p, arg1, 0);
8986         return ret;
8987 #endif
8988 #ifdef TARGET_NR_stime /* not on alpha */
8989     case TARGET_NR_stime:
8990         {
8991             struct timespec ts;
8992             ts.tv_nsec = 0;
8993             if (get_user_sal(ts.tv_sec, arg1)) {
8994                 return -TARGET_EFAULT;
8995             }
8996             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8997         }
8998 #endif
8999 #ifdef TARGET_NR_alarm /* not on alpha */
9000     case TARGET_NR_alarm:
9001         return alarm(arg1);
9002 #endif
9003 #ifdef TARGET_NR_pause /* not on alpha */
9004     case TARGET_NR_pause:
9005         if (!block_signals()) {
9006             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9007         }
9008         return -TARGET_EINTR;
9009 #endif
9010 #ifdef TARGET_NR_utime
9011     case TARGET_NR_utime:
9012         {
9013             struct utimbuf tbuf, *host_tbuf;
9014             struct target_utimbuf *target_tbuf;
9015             if (arg2) {
9016                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9017                     return -TARGET_EFAULT;
9018                 tbuf.actime = tswapal(target_tbuf->actime);
9019                 tbuf.modtime = tswapal(target_tbuf->modtime);
9020                 unlock_user_struct(target_tbuf, arg2, 0);
9021                 host_tbuf = &tbuf;
9022             } else {
9023                 host_tbuf = NULL;
9024             }
9025             if (!(p = lock_user_string(arg1)))
9026                 return -TARGET_EFAULT;
9027             ret = get_errno(utime(p, host_tbuf));
9028             unlock_user(p, arg1, 0);
9029         }
9030         return ret;
9031 #endif
9032 #ifdef TARGET_NR_utimes
9033     case TARGET_NR_utimes:
9034         {
9035             struct timeval *tvp, tv[2];
9036             if (arg2) {
9037                 if (copy_from_user_timeval(&tv[0], arg2)
9038                     || copy_from_user_timeval(&tv[1],
9039                                               arg2 + sizeof(struct target_timeval)))
9040                     return -TARGET_EFAULT;
9041                 tvp = tv;
9042             } else {
9043                 tvp = NULL;
9044             }
9045             if (!(p = lock_user_string(arg1)))
9046                 return -TARGET_EFAULT;
9047             ret = get_errno(utimes(p, tvp));
9048             unlock_user(p, arg1, 0);
9049         }
9050         return ret;
9051 #endif
9052 #if defined(TARGET_NR_futimesat)
9053     case TARGET_NR_futimesat:
9054         {
9055             struct timeval *tvp, tv[2];
9056             if (arg3) {
9057                 if (copy_from_user_timeval(&tv[0], arg3)
9058                     || copy_from_user_timeval(&tv[1],
9059                                               arg3 + sizeof(struct target_timeval)))
9060                     return -TARGET_EFAULT;
9061                 tvp = tv;
9062             } else {
9063                 tvp = NULL;
9064             }
9065             if (!(p = lock_user_string(arg2))) {
9066                 return -TARGET_EFAULT;
9067             }
9068             ret = get_errno(futimesat(arg1, path(p), tvp));
9069             unlock_user(p, arg2, 0);
9070         }
9071         return ret;
9072 #endif
9073 #ifdef TARGET_NR_access
9074     case TARGET_NR_access:
9075         if (!(p = lock_user_string(arg1))) {
9076             return -TARGET_EFAULT;
9077         }
9078         ret = get_errno(access(path(p), arg2));
9079         unlock_user(p, arg1, 0);
9080         return ret;
9081 #endif
9082 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9083     case TARGET_NR_faccessat:
9084         if (!(p = lock_user_string(arg2))) {
9085             return -TARGET_EFAULT;
9086         }
9087         ret = get_errno(faccessat(arg1, p, arg3, 0));
9088         unlock_user(p, arg2, 0);
9089         return ret;
9090 #endif
9091 #ifdef TARGET_NR_nice /* not on alpha */
9092     case TARGET_NR_nice:
9093         return get_errno(nice(arg1));
9094 #endif
9095     case TARGET_NR_sync:
9096         sync();
9097         return 0;
9098 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9099     case TARGET_NR_syncfs:
9100         return get_errno(syncfs(arg1));
9101 #endif
9102     case TARGET_NR_kill:
9103         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9104 #ifdef TARGET_NR_rename
9105     case TARGET_NR_rename:
9106         {
9107             void *p2;
9108             p = lock_user_string(arg1);
9109             p2 = lock_user_string(arg2);
9110             if (!p || !p2)
9111                 ret = -TARGET_EFAULT;
9112             else
9113                 ret = get_errno(rename(p, p2));
9114             unlock_user(p2, arg2, 0);
9115             unlock_user(p, arg1, 0);
9116         }
9117         return ret;
9118 #endif
9119 #if defined(TARGET_NR_renameat)
9120     case TARGET_NR_renameat:
9121         {
9122             void *p2;
9123             p  = lock_user_string(arg2);
9124             p2 = lock_user_string(arg4);
9125             if (!p || !p2)
9126                 ret = -TARGET_EFAULT;
9127             else
9128                 ret = get_errno(renameat(arg1, p, arg3, p2));
9129             unlock_user(p2, arg4, 0);
9130             unlock_user(p, arg2, 0);
9131         }
9132         return ret;
9133 #endif
9134 #if defined(TARGET_NR_renameat2)
9135     case TARGET_NR_renameat2:
9136         {
9137             void *p2;
9138             p  = lock_user_string(arg2);
9139             p2 = lock_user_string(arg4);
9140             if (!p || !p2) {
9141                 ret = -TARGET_EFAULT;
9142             } else {
9143                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9144             }
9145             unlock_user(p2, arg4, 0);
9146             unlock_user(p, arg2, 0);
9147         }
9148         return ret;
9149 #endif
9150 #ifdef TARGET_NR_mkdir
9151     case TARGET_NR_mkdir:
9152         if (!(p = lock_user_string(arg1)))
9153             return -TARGET_EFAULT;
9154         ret = get_errno(mkdir(p, arg2));
9155         unlock_user(p, arg1, 0);
9156         return ret;
9157 #endif
9158 #if defined(TARGET_NR_mkdirat)
9159     case TARGET_NR_mkdirat:
9160         if (!(p = lock_user_string(arg2)))
9161             return -TARGET_EFAULT;
9162         ret = get_errno(mkdirat(arg1, p, arg3));
9163         unlock_user(p, arg2, 0);
9164         return ret;
9165 #endif
9166 #ifdef TARGET_NR_rmdir
9167     case TARGET_NR_rmdir:
9168         if (!(p = lock_user_string(arg1)))
9169             return -TARGET_EFAULT;
9170         ret = get_errno(rmdir(p));
9171         unlock_user(p, arg1, 0);
9172         return ret;
9173 #endif
9174     case TARGET_NR_dup:
9175         ret = get_errno(dup(arg1));
9176         if (ret >= 0) {
9177             fd_trans_dup(arg1, ret);
9178         }
9179         return ret;
9180 #ifdef TARGET_NR_pipe
9181     case TARGET_NR_pipe:
9182         return do_pipe(cpu_env, arg1, 0, 0);
9183 #endif
9184 #ifdef TARGET_NR_pipe2
9185     case TARGET_NR_pipe2:
9186         return do_pipe(cpu_env, arg1,
9187                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9188 #endif
9189     case TARGET_NR_times:
9190         {
9191             struct target_tms *tmsp;
9192             struct tms tms;
9193             ret = get_errno(times(&tms));
9194             if (arg1) {
9195                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9196                 if (!tmsp)
9197                     return -TARGET_EFAULT;
9198                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9199                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9200                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9201                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9202             }
9203             if (!is_error(ret))
9204                 ret = host_to_target_clock_t(ret);
9205         }
9206         return ret;
9207     case TARGET_NR_acct:
9208         if (arg1 == 0) {
9209             ret = get_errno(acct(NULL));
9210         } else {
9211             if (!(p = lock_user_string(arg1))) {
9212                 return -TARGET_EFAULT;
9213             }
9214             ret = get_errno(acct(path(p)));
9215             unlock_user(p, arg1, 0);
9216         }
9217         return ret;
9218 #ifdef TARGET_NR_umount2
9219     case TARGET_NR_umount2:
9220         if (!(p = lock_user_string(arg1)))
9221             return -TARGET_EFAULT;
9222         ret = get_errno(umount2(p, arg2));
9223         unlock_user(p, arg1, 0);
9224         return ret;
9225 #endif
9226     case TARGET_NR_ioctl:
9227         return do_ioctl(arg1, arg2, arg3);
9228 #ifdef TARGET_NR_fcntl
9229     case TARGET_NR_fcntl:
9230         return do_fcntl(arg1, arg2, arg3);
9231 #endif
9232     case TARGET_NR_setpgid:
9233         return get_errno(setpgid(arg1, arg2));
9234     case TARGET_NR_umask:
9235         return get_errno(umask(arg1));
9236     case TARGET_NR_chroot:
9237         if (!(p = lock_user_string(arg1)))
9238             return -TARGET_EFAULT;
9239         ret = get_errno(chroot(p));
9240         unlock_user(p, arg1, 0);
9241         return ret;
9242 #ifdef TARGET_NR_dup2
9243     case TARGET_NR_dup2:
9244         ret = get_errno(dup2(arg1, arg2));
9245         if (ret >= 0) {
9246             fd_trans_dup(arg1, arg2);
9247         }
9248         return ret;
9249 #endif
9250 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9251     case TARGET_NR_dup3:
9252     {
9253         int host_flags;
9254 
9255         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9256             return -EINVAL;
9257         }
9258         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9259         ret = get_errno(dup3(arg1, arg2, host_flags));
9260         if (ret >= 0) {
9261             fd_trans_dup(arg1, arg2);
9262         }
9263         return ret;
9264     }
9265 #endif
9266 #ifdef TARGET_NR_getppid /* not on alpha */
9267     case TARGET_NR_getppid:
9268         return get_errno(getppid());
9269 #endif
9270 #ifdef TARGET_NR_getpgrp
9271     case TARGET_NR_getpgrp:
9272         return get_errno(getpgrp());
9273 #endif
9274     case TARGET_NR_setsid:
9275         return get_errno(setsid());
9276 #ifdef TARGET_NR_sigaction
9277     case TARGET_NR_sigaction:
9278         {
9279 #if defined(TARGET_MIPS)
9280 	    struct target_sigaction act, oact, *pact, *old_act;
9281 
9282 	    if (arg2) {
9283                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9284                     return -TARGET_EFAULT;
9285 		act._sa_handler = old_act->_sa_handler;
9286 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9287 		act.sa_flags = old_act->sa_flags;
9288 		unlock_user_struct(old_act, arg2, 0);
9289 		pact = &act;
9290 	    } else {
9291 		pact = NULL;
9292 	    }
9293 
9294         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9295 
9296 	    if (!is_error(ret) && arg3) {
9297                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9298                     return -TARGET_EFAULT;
9299 		old_act->_sa_handler = oact._sa_handler;
9300 		old_act->sa_flags = oact.sa_flags;
9301 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9302 		old_act->sa_mask.sig[1] = 0;
9303 		old_act->sa_mask.sig[2] = 0;
9304 		old_act->sa_mask.sig[3] = 0;
9305 		unlock_user_struct(old_act, arg3, 1);
9306 	    }
9307 #else
9308             struct target_old_sigaction *old_act;
9309             struct target_sigaction act, oact, *pact;
9310             if (arg2) {
9311                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9312                     return -TARGET_EFAULT;
9313                 act._sa_handler = old_act->_sa_handler;
9314                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9315                 act.sa_flags = old_act->sa_flags;
9316 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9317                 act.sa_restorer = old_act->sa_restorer;
9318 #endif
9319                 unlock_user_struct(old_act, arg2, 0);
9320                 pact = &act;
9321             } else {
9322                 pact = NULL;
9323             }
9324             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9325             if (!is_error(ret) && arg3) {
9326                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9327                     return -TARGET_EFAULT;
9328                 old_act->_sa_handler = oact._sa_handler;
9329                 old_act->sa_mask = oact.sa_mask.sig[0];
9330                 old_act->sa_flags = oact.sa_flags;
9331 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9332                 old_act->sa_restorer = oact.sa_restorer;
9333 #endif
9334                 unlock_user_struct(old_act, arg3, 1);
9335             }
9336 #endif
9337         }
9338         return ret;
9339 #endif
9340     case TARGET_NR_rt_sigaction:
9341         {
9342             /*
9343              * For Alpha and SPARC this is a 5 argument syscall, with
9344              * a 'restorer' parameter which must be copied into the
9345              * sa_restorer field of the sigaction struct.
9346              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9347              * and arg5 is the sigsetsize.
9348              */
9349 #if defined(TARGET_ALPHA)
9350             target_ulong sigsetsize = arg4;
9351             target_ulong restorer = arg5;
9352 #elif defined(TARGET_SPARC)
9353             target_ulong restorer = arg4;
9354             target_ulong sigsetsize = arg5;
9355 #else
9356             target_ulong sigsetsize = arg4;
9357             target_ulong restorer = 0;
9358 #endif
9359             struct target_sigaction *act = NULL;
9360             struct target_sigaction *oact = NULL;
9361 
9362             if (sigsetsize != sizeof(target_sigset_t)) {
9363                 return -TARGET_EINVAL;
9364             }
9365             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9366                 return -TARGET_EFAULT;
9367             }
9368             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9369                 ret = -TARGET_EFAULT;
9370             } else {
9371                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9372                 if (oact) {
9373                     unlock_user_struct(oact, arg3, 1);
9374                 }
9375             }
9376             if (act) {
9377                 unlock_user_struct(act, arg2, 0);
9378             }
9379         }
9380         return ret;
9381 #ifdef TARGET_NR_sgetmask /* not on alpha */
9382     case TARGET_NR_sgetmask:
9383         {
9384             sigset_t cur_set;
9385             abi_ulong target_set;
9386             ret = do_sigprocmask(0, NULL, &cur_set);
9387             if (!ret) {
9388                 host_to_target_old_sigset(&target_set, &cur_set);
9389                 ret = target_set;
9390             }
9391         }
9392         return ret;
9393 #endif
9394 #ifdef TARGET_NR_ssetmask /* not on alpha */
9395     case TARGET_NR_ssetmask:
9396         {
9397             sigset_t set, oset;
9398             abi_ulong target_set = arg1;
9399             target_to_host_old_sigset(&set, &target_set);
9400             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9401             if (!ret) {
9402                 host_to_target_old_sigset(&target_set, &oset);
9403                 ret = target_set;
9404             }
9405         }
9406         return ret;
9407 #endif
9408 #ifdef TARGET_NR_sigprocmask
9409     case TARGET_NR_sigprocmask:
9410         {
9411 #if defined(TARGET_ALPHA)
9412             sigset_t set, oldset;
9413             abi_ulong mask;
9414             int how;
9415 
9416             switch (arg1) {
9417             case TARGET_SIG_BLOCK:
9418                 how = SIG_BLOCK;
9419                 break;
9420             case TARGET_SIG_UNBLOCK:
9421                 how = SIG_UNBLOCK;
9422                 break;
9423             case TARGET_SIG_SETMASK:
9424                 how = SIG_SETMASK;
9425                 break;
9426             default:
9427                 return -TARGET_EINVAL;
9428             }
9429             mask = arg2;
9430             target_to_host_old_sigset(&set, &mask);
9431 
9432             ret = do_sigprocmask(how, &set, &oldset);
9433             if (!is_error(ret)) {
9434                 host_to_target_old_sigset(&mask, &oldset);
9435                 ret = mask;
9436                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9437             }
9438 #else
9439             sigset_t set, oldset, *set_ptr;
9440             int how;
9441 
9442             if (arg2) {
9443                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9444                 if (!p) {
9445                     return -TARGET_EFAULT;
9446                 }
9447                 target_to_host_old_sigset(&set, p);
9448                 unlock_user(p, arg2, 0);
9449                 set_ptr = &set;
9450                 switch (arg1) {
9451                 case TARGET_SIG_BLOCK:
9452                     how = SIG_BLOCK;
9453                     break;
9454                 case TARGET_SIG_UNBLOCK:
9455                     how = SIG_UNBLOCK;
9456                     break;
9457                 case TARGET_SIG_SETMASK:
9458                     how = SIG_SETMASK;
9459                     break;
9460                 default:
9461                     return -TARGET_EINVAL;
9462                 }
9463             } else {
9464                 how = 0;
9465                 set_ptr = NULL;
9466             }
9467             ret = do_sigprocmask(how, set_ptr, &oldset);
9468             if (!is_error(ret) && arg3) {
9469                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9470                     return -TARGET_EFAULT;
9471                 host_to_target_old_sigset(p, &oldset);
9472                 unlock_user(p, arg3, sizeof(target_sigset_t));
9473             }
9474 #endif
9475         }
9476         return ret;
9477 #endif
9478     case TARGET_NR_rt_sigprocmask:
9479         {
9480             int how = arg1;
9481             sigset_t set, oldset, *set_ptr;
9482 
9483             if (arg4 != sizeof(target_sigset_t)) {
9484                 return -TARGET_EINVAL;
9485             }
9486 
9487             if (arg2) {
9488                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9489                 if (!p) {
9490                     return -TARGET_EFAULT;
9491                 }
9492                 target_to_host_sigset(&set, p);
9493                 unlock_user(p, arg2, 0);
9494                 set_ptr = &set;
9495                 switch(how) {
9496                 case TARGET_SIG_BLOCK:
9497                     how = SIG_BLOCK;
9498                     break;
9499                 case TARGET_SIG_UNBLOCK:
9500                     how = SIG_UNBLOCK;
9501                     break;
9502                 case TARGET_SIG_SETMASK:
9503                     how = SIG_SETMASK;
9504                     break;
9505                 default:
9506                     return -TARGET_EINVAL;
9507                 }
9508             } else {
9509                 how = 0;
9510                 set_ptr = NULL;
9511             }
9512             ret = do_sigprocmask(how, set_ptr, &oldset);
9513             if (!is_error(ret) && arg3) {
9514                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9515                     return -TARGET_EFAULT;
9516                 host_to_target_sigset(p, &oldset);
9517                 unlock_user(p, arg3, sizeof(target_sigset_t));
9518             }
9519         }
9520         return ret;
9521 #ifdef TARGET_NR_sigpending
9522     case TARGET_NR_sigpending:
9523         {
9524             sigset_t set;
9525             ret = get_errno(sigpending(&set));
9526             if (!is_error(ret)) {
9527                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9528                     return -TARGET_EFAULT;
9529                 host_to_target_old_sigset(p, &set);
9530                 unlock_user(p, arg1, sizeof(target_sigset_t));
9531             }
9532         }
9533         return ret;
9534 #endif
9535     case TARGET_NR_rt_sigpending:
9536         {
9537             sigset_t set;
9538 
9539             /* Yes, this check is >, not != like most. We follow the kernel's
9540              * logic and it does it like this because it implements
9541              * NR_sigpending through the same code path, and in that case
9542              * the old_sigset_t is smaller in size.
9543              */
9544             if (arg2 > sizeof(target_sigset_t)) {
9545                 return -TARGET_EINVAL;
9546             }
9547 
9548             ret = get_errno(sigpending(&set));
9549             if (!is_error(ret)) {
9550                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9551                     return -TARGET_EFAULT;
9552                 host_to_target_sigset(p, &set);
9553                 unlock_user(p, arg1, sizeof(target_sigset_t));
9554             }
9555         }
9556         return ret;
9557 #ifdef TARGET_NR_sigsuspend
9558     case TARGET_NR_sigsuspend:
9559         {
9560             TaskState *ts = cpu->opaque;
9561 #if defined(TARGET_ALPHA)
9562             abi_ulong mask = arg1;
9563             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9564 #else
9565             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9566                 return -TARGET_EFAULT;
9567             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9568             unlock_user(p, arg1, 0);
9569 #endif
9570             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9571                                                SIGSET_T_SIZE));
9572             if (ret != -QEMU_ERESTARTSYS) {
9573                 ts->in_sigsuspend = 1;
9574             }
9575         }
9576         return ret;
9577 #endif
9578     case TARGET_NR_rt_sigsuspend:
9579         {
9580             TaskState *ts = cpu->opaque;
9581 
9582             if (arg2 != sizeof(target_sigset_t)) {
9583                 return -TARGET_EINVAL;
9584             }
9585             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9586                 return -TARGET_EFAULT;
9587             target_to_host_sigset(&ts->sigsuspend_mask, p);
9588             unlock_user(p, arg1, 0);
9589             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9590                                                SIGSET_T_SIZE));
9591             if (ret != -QEMU_ERESTARTSYS) {
9592                 ts->in_sigsuspend = 1;
9593             }
9594         }
9595         return ret;
9596 #ifdef TARGET_NR_rt_sigtimedwait
9597     case TARGET_NR_rt_sigtimedwait:
9598         {
9599             sigset_t set;
9600             struct timespec uts, *puts;
9601             siginfo_t uinfo;
9602 
9603             if (arg4 != sizeof(target_sigset_t)) {
9604                 return -TARGET_EINVAL;
9605             }
9606 
9607             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9608                 return -TARGET_EFAULT;
9609             target_to_host_sigset(&set, p);
9610             unlock_user(p, arg1, 0);
9611             if (arg3) {
9612                 puts = &uts;
9613                 if (target_to_host_timespec(puts, arg3)) {
9614                     return -TARGET_EFAULT;
9615                 }
9616             } else {
9617                 puts = NULL;
9618             }
9619             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9620                                                  SIGSET_T_SIZE));
9621             if (!is_error(ret)) {
9622                 if (arg2) {
9623                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9624                                   0);
9625                     if (!p) {
9626                         return -TARGET_EFAULT;
9627                     }
9628                     host_to_target_siginfo(p, &uinfo);
9629                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9630                 }
9631                 ret = host_to_target_signal(ret);
9632             }
9633         }
9634         return ret;
9635 #endif
9636 #ifdef TARGET_NR_rt_sigtimedwait_time64
9637     case TARGET_NR_rt_sigtimedwait_time64:
9638         {
9639             sigset_t set;
9640             struct timespec uts, *puts;
9641             siginfo_t uinfo;
9642 
9643             if (arg4 != sizeof(target_sigset_t)) {
9644                 return -TARGET_EINVAL;
9645             }
9646 
9647             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9648             if (!p) {
9649                 return -TARGET_EFAULT;
9650             }
9651             target_to_host_sigset(&set, p);
9652             unlock_user(p, arg1, 0);
9653             if (arg3) {
9654                 puts = &uts;
9655                 if (target_to_host_timespec64(puts, arg3)) {
9656                     return -TARGET_EFAULT;
9657                 }
9658             } else {
9659                 puts = NULL;
9660             }
9661             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9662                                                  SIGSET_T_SIZE));
9663             if (!is_error(ret)) {
9664                 if (arg2) {
9665                     p = lock_user(VERIFY_WRITE, arg2,
9666                                   sizeof(target_siginfo_t), 0);
9667                     if (!p) {
9668                         return -TARGET_EFAULT;
9669                     }
9670                     host_to_target_siginfo(p, &uinfo);
9671                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9672                 }
9673                 ret = host_to_target_signal(ret);
9674             }
9675         }
9676         return ret;
9677 #endif
9678     case TARGET_NR_rt_sigqueueinfo:
9679         {
9680             siginfo_t uinfo;
9681 
9682             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9683             if (!p) {
9684                 return -TARGET_EFAULT;
9685             }
9686             target_to_host_siginfo(&uinfo, p);
9687             unlock_user(p, arg3, 0);
9688             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9689         }
9690         return ret;
9691     case TARGET_NR_rt_tgsigqueueinfo:
9692         {
9693             siginfo_t uinfo;
9694 
9695             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9696             if (!p) {
9697                 return -TARGET_EFAULT;
9698             }
9699             target_to_host_siginfo(&uinfo, p);
9700             unlock_user(p, arg4, 0);
9701             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9702         }
9703         return ret;
9704 #ifdef TARGET_NR_sigreturn
9705     case TARGET_NR_sigreturn:
9706         if (block_signals()) {
9707             return -QEMU_ERESTARTSYS;
9708         }
9709         return do_sigreturn(cpu_env);
9710 #endif
9711     case TARGET_NR_rt_sigreturn:
9712         if (block_signals()) {
9713             return -QEMU_ERESTARTSYS;
9714         }
9715         return do_rt_sigreturn(cpu_env);
9716     case TARGET_NR_sethostname:
9717         if (!(p = lock_user_string(arg1)))
9718             return -TARGET_EFAULT;
9719         ret = get_errno(sethostname(p, arg2));
9720         unlock_user(p, arg1, 0);
9721         return ret;
9722 #ifdef TARGET_NR_setrlimit
9723     case TARGET_NR_setrlimit:
9724         {
9725             int resource = target_to_host_resource(arg1);
9726             struct target_rlimit *target_rlim;
9727             struct rlimit rlim;
9728             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9729                 return -TARGET_EFAULT;
9730             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9731             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9732             unlock_user_struct(target_rlim, arg2, 0);
9733             /*
9734              * If we just passed through resource limit settings for memory then
9735              * they would also apply to QEMU's own allocations, and QEMU will
9736              * crash or hang or die if its allocations fail. Ideally we would
9737              * track the guest allocations in QEMU and apply the limits ourselves.
9738              * For now, just tell the guest the call succeeded but don't actually
9739              * limit anything.
9740              */
9741             if (resource != RLIMIT_AS &&
9742                 resource != RLIMIT_DATA &&
9743                 resource != RLIMIT_STACK) {
9744                 return get_errno(setrlimit(resource, &rlim));
9745             } else {
9746                 return 0;
9747             }
9748         }
9749 #endif
9750 #ifdef TARGET_NR_getrlimit
9751     case TARGET_NR_getrlimit:
9752         {
9753             int resource = target_to_host_resource(arg1);
9754             struct target_rlimit *target_rlim;
9755             struct rlimit rlim;
9756 
9757             ret = get_errno(getrlimit(resource, &rlim));
9758             if (!is_error(ret)) {
9759                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9760                     return -TARGET_EFAULT;
9761                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9762                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9763                 unlock_user_struct(target_rlim, arg2, 1);
9764             }
9765         }
9766         return ret;
9767 #endif
9768     case TARGET_NR_getrusage:
9769         {
9770             struct rusage rusage;
9771             ret = get_errno(getrusage(arg1, &rusage));
9772             if (!is_error(ret)) {
9773                 ret = host_to_target_rusage(arg2, &rusage);
9774             }
9775         }
9776         return ret;
9777 #if defined(TARGET_NR_gettimeofday)
9778     case TARGET_NR_gettimeofday:
9779         {
9780             struct timeval tv;
9781             struct timezone tz;
9782 
9783             ret = get_errno(gettimeofday(&tv, &tz));
9784             if (!is_error(ret)) {
9785                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9786                     return -TARGET_EFAULT;
9787                 }
9788                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9789                     return -TARGET_EFAULT;
9790                 }
9791             }
9792         }
9793         return ret;
9794 #endif
9795 #if defined(TARGET_NR_settimeofday)
9796     case TARGET_NR_settimeofday:
9797         {
9798             struct timeval tv, *ptv = NULL;
9799             struct timezone tz, *ptz = NULL;
9800 
9801             if (arg1) {
9802                 if (copy_from_user_timeval(&tv, arg1)) {
9803                     return -TARGET_EFAULT;
9804                 }
9805                 ptv = &tv;
9806             }
9807 
9808             if (arg2) {
9809                 if (copy_from_user_timezone(&tz, arg2)) {
9810                     return -TARGET_EFAULT;
9811                 }
9812                 ptz = &tz;
9813             }
9814 
9815             return get_errno(settimeofday(ptv, ptz));
9816         }
9817 #endif
9818 #if defined(TARGET_NR_select)
9819     case TARGET_NR_select:
9820 #if defined(TARGET_WANT_NI_OLD_SELECT)
9821         /* some architectures used to have old_select here
9822          * but now ENOSYS it.
9823          */
9824         ret = -TARGET_ENOSYS;
9825 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9826         ret = do_old_select(arg1);
9827 #else
9828         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9829 #endif
9830         return ret;
9831 #endif
9832 #ifdef TARGET_NR_pselect6
9833     case TARGET_NR_pselect6:
9834         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9835 #endif
9836 #ifdef TARGET_NR_pselect6_time64
9837     case TARGET_NR_pselect6_time64:
9838         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9839 #endif
9840 #ifdef TARGET_NR_symlink
9841     case TARGET_NR_symlink:
9842         {
9843             void *p2;
9844             p = lock_user_string(arg1);
9845             p2 = lock_user_string(arg2);
9846             if (!p || !p2)
9847                 ret = -TARGET_EFAULT;
9848             else
9849                 ret = get_errno(symlink(p, p2));
9850             unlock_user(p2, arg2, 0);
9851             unlock_user(p, arg1, 0);
9852         }
9853         return ret;
9854 #endif
9855 #if defined(TARGET_NR_symlinkat)
9856     case TARGET_NR_symlinkat:
9857         {
9858             void *p2;
9859             p  = lock_user_string(arg1);
9860             p2 = lock_user_string(arg3);
9861             if (!p || !p2)
9862                 ret = -TARGET_EFAULT;
9863             else
9864                 ret = get_errno(symlinkat(p, arg2, p2));
9865             unlock_user(p2, arg3, 0);
9866             unlock_user(p, arg1, 0);
9867         }
9868         return ret;
9869 #endif
9870 #ifdef TARGET_NR_readlink
9871     case TARGET_NR_readlink:
9872         {
9873             void *p2;
9874             p = lock_user_string(arg1);
9875             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9876             if (!p || !p2) {
9877                 ret = -TARGET_EFAULT;
9878             } else if (!arg3) {
9879                 /* Short circuit this for the magic exe check. */
9880                 ret = -TARGET_EINVAL;
9881             } else if (is_proc_myself((const char *)p, "exe")) {
9882                 char real[PATH_MAX], *temp;
9883                 temp = realpath(exec_path, real);
9884                 /* Return value is # of bytes that we wrote to the buffer. */
9885                 if (temp == NULL) {
9886                     ret = get_errno(-1);
9887                 } else {
9888                     /* Don't worry about sign mismatch as earlier mapping
9889                      * logic would have thrown a bad address error. */
9890                     ret = MIN(strlen(real), arg3);
9891                     /* We cannot NUL terminate the string. */
9892                     memcpy(p2, real, ret);
9893                 }
9894             } else {
9895                 ret = get_errno(readlink(path(p), p2, arg3));
9896             }
9897             unlock_user(p2, arg2, ret);
9898             unlock_user(p, arg1, 0);
9899         }
9900         return ret;
9901 #endif
9902 #if defined(TARGET_NR_readlinkat)
9903     case TARGET_NR_readlinkat:
9904         {
9905             void *p2;
9906             p  = lock_user_string(arg2);
9907             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9908             if (!p || !p2) {
9909                 ret = -TARGET_EFAULT;
9910             } else if (is_proc_myself((const char *)p, "exe")) {
9911                 char real[PATH_MAX], *temp;
9912                 temp = realpath(exec_path, real);
9913                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9914                 snprintf((char *)p2, arg4, "%s", real);
9915             } else {
9916                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9917             }
9918             unlock_user(p2, arg3, ret);
9919             unlock_user(p, arg2, 0);
9920         }
9921         return ret;
9922 #endif
9923 #ifdef TARGET_NR_swapon
9924     case TARGET_NR_swapon:
9925         if (!(p = lock_user_string(arg1)))
9926             return -TARGET_EFAULT;
9927         ret = get_errno(swapon(p, arg2));
9928         unlock_user(p, arg1, 0);
9929         return ret;
9930 #endif
9931     case TARGET_NR_reboot:
9932         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9933            /* arg4 must be ignored in all other cases */
9934            p = lock_user_string(arg4);
9935            if (!p) {
9936                return -TARGET_EFAULT;
9937            }
9938            ret = get_errno(reboot(arg1, arg2, arg3, p));
9939            unlock_user(p, arg4, 0);
9940         } else {
9941            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9942         }
9943         return ret;
9944 #ifdef TARGET_NR_mmap
9945     case TARGET_NR_mmap:
9946 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9947     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9948     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9949     || defined(TARGET_S390X)
9950         {
9951             abi_ulong *v;
9952             abi_ulong v1, v2, v3, v4, v5, v6;
9953             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9954                 return -TARGET_EFAULT;
9955             v1 = tswapal(v[0]);
9956             v2 = tswapal(v[1]);
9957             v3 = tswapal(v[2]);
9958             v4 = tswapal(v[3]);
9959             v5 = tswapal(v[4]);
9960             v6 = tswapal(v[5]);
9961             unlock_user(v, arg1, 0);
9962             ret = get_errno(target_mmap(v1, v2, v3,
9963                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9964                                         v5, v6));
9965         }
9966 #else
9967         /* mmap pointers are always untagged */
9968         ret = get_errno(target_mmap(arg1, arg2, arg3,
9969                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9970                                     arg5,
9971                                     arg6));
9972 #endif
9973         return ret;
9974 #endif
9975 #ifdef TARGET_NR_mmap2
9976     case TARGET_NR_mmap2:
9977 #ifndef MMAP_SHIFT
9978 #define MMAP_SHIFT 12
9979 #endif
9980         ret = target_mmap(arg1, arg2, arg3,
9981                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9982                           arg5, arg6 << MMAP_SHIFT);
9983         return get_errno(ret);
9984 #endif
9985     case TARGET_NR_munmap:
9986         arg1 = cpu_untagged_addr(cpu, arg1);
9987         return get_errno(target_munmap(arg1, arg2));
9988     case TARGET_NR_mprotect:
9989         arg1 = cpu_untagged_addr(cpu, arg1);
9990         {
9991             TaskState *ts = cpu->opaque;
9992             /* Special hack to detect libc making the stack executable.  */
9993             if ((arg3 & PROT_GROWSDOWN)
9994                 && arg1 >= ts->info->stack_limit
9995                 && arg1 <= ts->info->start_stack) {
9996                 arg3 &= ~PROT_GROWSDOWN;
9997                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9998                 arg1 = ts->info->stack_limit;
9999             }
10000         }
10001         return get_errno(target_mprotect(arg1, arg2, arg3));
10002 #ifdef TARGET_NR_mremap
10003     case TARGET_NR_mremap:
10004         arg1 = cpu_untagged_addr(cpu, arg1);
10005         /* mremap new_addr (arg5) is always untagged */
10006         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10007 #endif
10008         /* ??? msync/mlock/munlock are broken for softmmu.  */
10009 #ifdef TARGET_NR_msync
10010     case TARGET_NR_msync:
10011         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10012 #endif
10013 #ifdef TARGET_NR_mlock
10014     case TARGET_NR_mlock:
10015         return get_errno(mlock(g2h(cpu, arg1), arg2));
10016 #endif
10017 #ifdef TARGET_NR_munlock
10018     case TARGET_NR_munlock:
10019         return get_errno(munlock(g2h(cpu, arg1), arg2));
10020 #endif
10021 #ifdef TARGET_NR_mlockall
10022     case TARGET_NR_mlockall:
10023         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10024 #endif
10025 #ifdef TARGET_NR_munlockall
10026     case TARGET_NR_munlockall:
10027         return get_errno(munlockall());
10028 #endif
10029 #ifdef TARGET_NR_truncate
10030     case TARGET_NR_truncate:
10031         if (!(p = lock_user_string(arg1)))
10032             return -TARGET_EFAULT;
10033         ret = get_errno(truncate(p, arg2));
10034         unlock_user(p, arg1, 0);
10035         return ret;
10036 #endif
10037 #ifdef TARGET_NR_ftruncate
10038     case TARGET_NR_ftruncate:
10039         return get_errno(ftruncate(arg1, arg2));
10040 #endif
10041     case TARGET_NR_fchmod:
10042         return get_errno(fchmod(arg1, arg2));
10043 #if defined(TARGET_NR_fchmodat)
10044     case TARGET_NR_fchmodat:
10045         if (!(p = lock_user_string(arg2)))
10046             return -TARGET_EFAULT;
10047         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10048         unlock_user(p, arg2, 0);
10049         return ret;
10050 #endif
10051     case TARGET_NR_getpriority:
10052         /* Note that negative values are valid for getpriority, so we must
10053            differentiate based on errno settings.  */
10054         errno = 0;
10055         ret = getpriority(arg1, arg2);
10056         if (ret == -1 && errno != 0) {
10057             return -host_to_target_errno(errno);
10058         }
10059 #ifdef TARGET_ALPHA
10060         /* Return value is the unbiased priority.  Signal no error.  */
10061         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
10062 #else
10063         /* Return value is a biased priority to avoid negative numbers.  */
10064         ret = 20 - ret;
10065 #endif
10066         return ret;
10067     case TARGET_NR_setpriority:
10068         return get_errno(setpriority(arg1, arg2, arg3));
10069 #ifdef TARGET_NR_statfs
10070     case TARGET_NR_statfs:
10071         if (!(p = lock_user_string(arg1))) {
10072             return -TARGET_EFAULT;
10073         }
10074         ret = get_errno(statfs(path(p), &stfs));
10075         unlock_user(p, arg1, 0);
10076     convert_statfs:
10077         if (!is_error(ret)) {
10078             struct target_statfs *target_stfs;
10079 
10080             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10081                 return -TARGET_EFAULT;
10082             __put_user(stfs.f_type, &target_stfs->f_type);
10083             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10084             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10085             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10086             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10087             __put_user(stfs.f_files, &target_stfs->f_files);
10088             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10089             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10090             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10091             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10092             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10093 #ifdef _STATFS_F_FLAGS
10094             __put_user(stfs.f_flags, &target_stfs->f_flags);
10095 #else
10096             __put_user(0, &target_stfs->f_flags);
10097 #endif
10098             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10099             unlock_user_struct(target_stfs, arg2, 1);
10100         }
10101         return ret;
10102 #endif
10103 #ifdef TARGET_NR_fstatfs
10104     case TARGET_NR_fstatfs:
10105         ret = get_errno(fstatfs(arg1, &stfs));
10106         goto convert_statfs;
10107 #endif
10108 #ifdef TARGET_NR_statfs64
10109     case TARGET_NR_statfs64:
10110         if (!(p = lock_user_string(arg1))) {
10111             return -TARGET_EFAULT;
10112         }
10113         ret = get_errno(statfs(path(p), &stfs));
10114         unlock_user(p, arg1, 0);
10115     convert_statfs64:
10116         if (!is_error(ret)) {
10117             struct target_statfs64 *target_stfs;
10118 
10119             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10120                 return -TARGET_EFAULT;
10121             __put_user(stfs.f_type, &target_stfs->f_type);
10122             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10123             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10124             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10125             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10126             __put_user(stfs.f_files, &target_stfs->f_files);
10127             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10128             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10129             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10130             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10131             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10132 #ifdef _STATFS_F_FLAGS
10133             __put_user(stfs.f_flags, &target_stfs->f_flags);
10134 #else
10135             __put_user(0, &target_stfs->f_flags);
10136 #endif
10137             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10138             unlock_user_struct(target_stfs, arg3, 1);
10139         }
10140         return ret;
10141     case TARGET_NR_fstatfs64:
10142         ret = get_errno(fstatfs(arg1, &stfs));
10143         goto convert_statfs64;
10144 #endif
10145 #ifdef TARGET_NR_socketcall
10146     case TARGET_NR_socketcall:
10147         return do_socketcall(arg1, arg2);
10148 #endif
10149 #ifdef TARGET_NR_accept
10150     case TARGET_NR_accept:
10151         return do_accept4(arg1, arg2, arg3, 0);
10152 #endif
10153 #ifdef TARGET_NR_accept4
10154     case TARGET_NR_accept4:
10155         return do_accept4(arg1, arg2, arg3, arg4);
10156 #endif
10157 #ifdef TARGET_NR_bind
10158     case TARGET_NR_bind:
10159         return do_bind(arg1, arg2, arg3);
10160 #endif
10161 #ifdef TARGET_NR_connect
10162     case TARGET_NR_connect:
10163         return do_connect(arg1, arg2, arg3);
10164 #endif
10165 #ifdef TARGET_NR_getpeername
10166     case TARGET_NR_getpeername:
10167         return do_getpeername(arg1, arg2, arg3);
10168 #endif
10169 #ifdef TARGET_NR_getsockname
10170     case TARGET_NR_getsockname:
10171         return do_getsockname(arg1, arg2, arg3);
10172 #endif
10173 #ifdef TARGET_NR_getsockopt
10174     case TARGET_NR_getsockopt:
10175         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10176 #endif
10177 #ifdef TARGET_NR_listen
10178     case TARGET_NR_listen:
10179         return get_errno(listen(arg1, arg2));
10180 #endif
10181 #ifdef TARGET_NR_recv
10182     case TARGET_NR_recv:
10183         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10184 #endif
10185 #ifdef TARGET_NR_recvfrom
10186     case TARGET_NR_recvfrom:
10187         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10188 #endif
10189 #ifdef TARGET_NR_recvmsg
10190     case TARGET_NR_recvmsg:
10191         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10192 #endif
10193 #ifdef TARGET_NR_send
10194     case TARGET_NR_send:
10195         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10196 #endif
10197 #ifdef TARGET_NR_sendmsg
10198     case TARGET_NR_sendmsg:
10199         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10200 #endif
10201 #ifdef TARGET_NR_sendmmsg
10202     case TARGET_NR_sendmmsg:
10203         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10204 #endif
10205 #ifdef TARGET_NR_recvmmsg
10206     case TARGET_NR_recvmmsg:
10207         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10208 #endif
10209 #ifdef TARGET_NR_sendto
10210     case TARGET_NR_sendto:
10211         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10212 #endif
10213 #ifdef TARGET_NR_shutdown
10214     case TARGET_NR_shutdown:
10215         return get_errno(shutdown(arg1, arg2));
10216 #endif
10217 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10218     case TARGET_NR_getrandom:
10219         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10220         if (!p) {
10221             return -TARGET_EFAULT;
10222         }
10223         ret = get_errno(getrandom(p, arg2, arg3));
10224         unlock_user(p, arg1, ret);
10225         return ret;
10226 #endif
10227 #ifdef TARGET_NR_socket
10228     case TARGET_NR_socket:
10229         return do_socket(arg1, arg2, arg3);
10230 #endif
10231 #ifdef TARGET_NR_socketpair
10232     case TARGET_NR_socketpair:
10233         return do_socketpair(arg1, arg2, arg3, arg4);
10234 #endif
10235 #ifdef TARGET_NR_setsockopt
10236     case TARGET_NR_setsockopt:
10237         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10238 #endif
10239 #if defined(TARGET_NR_syslog)
10240     case TARGET_NR_syslog:
10241         {
10242             int len = arg2;
10243 
10244             switch (arg1) {
10245             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10246             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10247             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10248             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10249             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10250             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10251             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10252             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10253                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10254             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10255             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10256             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10257                 {
10258                     if (len < 0) {
10259                         return -TARGET_EINVAL;
10260                     }
10261                     if (len == 0) {
10262                         return 0;
10263                     }
10264                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10265                     if (!p) {
10266                         return -TARGET_EFAULT;
10267                     }
10268                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10269                     unlock_user(p, arg2, arg3);
10270                 }
10271                 return ret;
10272             default:
10273                 return -TARGET_EINVAL;
10274             }
10275         }
10276         break;
10277 #endif
10278     case TARGET_NR_setitimer:
10279         {
10280             struct itimerval value, ovalue, *pvalue;
10281 
10282             if (arg2) {
10283                 pvalue = &value;
10284                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10285                     || copy_from_user_timeval(&pvalue->it_value,
10286                                               arg2 + sizeof(struct target_timeval)))
10287                     return -TARGET_EFAULT;
10288             } else {
10289                 pvalue = NULL;
10290             }
10291             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10292             if (!is_error(ret) && arg3) {
10293                 if (copy_to_user_timeval(arg3,
10294                                          &ovalue.it_interval)
10295                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10296                                             &ovalue.it_value))
10297                     return -TARGET_EFAULT;
10298             }
10299         }
10300         return ret;
10301     case TARGET_NR_getitimer:
10302         {
10303             struct itimerval value;
10304 
10305             ret = get_errno(getitimer(arg1, &value));
10306             if (!is_error(ret) && arg2) {
10307                 if (copy_to_user_timeval(arg2,
10308                                          &value.it_interval)
10309                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10310                                             &value.it_value))
10311                     return -TARGET_EFAULT;
10312             }
10313         }
10314         return ret;
10315 #ifdef TARGET_NR_stat
10316     case TARGET_NR_stat:
10317         if (!(p = lock_user_string(arg1))) {
10318             return -TARGET_EFAULT;
10319         }
10320         ret = get_errno(stat(path(p), &st));
10321         unlock_user(p, arg1, 0);
10322         goto do_stat;
10323 #endif
10324 #ifdef TARGET_NR_lstat
10325     case TARGET_NR_lstat:
10326         if (!(p = lock_user_string(arg1))) {
10327             return -TARGET_EFAULT;
10328         }
10329         ret = get_errno(lstat(path(p), &st));
10330         unlock_user(p, arg1, 0);
10331         goto do_stat;
10332 #endif
10333 #ifdef TARGET_NR_fstat
10334     case TARGET_NR_fstat:
10335         {
10336             ret = get_errno(fstat(arg1, &st));
10337 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10338         do_stat:
10339 #endif
10340             if (!is_error(ret)) {
10341                 struct target_stat *target_st;
10342 
10343                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10344                     return -TARGET_EFAULT;
10345                 memset(target_st, 0, sizeof(*target_st));
10346                 __put_user(st.st_dev, &target_st->st_dev);
10347                 __put_user(st.st_ino, &target_st->st_ino);
10348                 __put_user(st.st_mode, &target_st->st_mode);
10349                 __put_user(st.st_uid, &target_st->st_uid);
10350                 __put_user(st.st_gid, &target_st->st_gid);
10351                 __put_user(st.st_nlink, &target_st->st_nlink);
10352                 __put_user(st.st_rdev, &target_st->st_rdev);
10353                 __put_user(st.st_size, &target_st->st_size);
10354                 __put_user(st.st_blksize, &target_st->st_blksize);
10355                 __put_user(st.st_blocks, &target_st->st_blocks);
10356                 __put_user(st.st_atime, &target_st->target_st_atime);
10357                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10358                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10359 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10360                 __put_user(st.st_atim.tv_nsec,
10361                            &target_st->target_st_atime_nsec);
10362                 __put_user(st.st_mtim.tv_nsec,
10363                            &target_st->target_st_mtime_nsec);
10364                 __put_user(st.st_ctim.tv_nsec,
10365                            &target_st->target_st_ctime_nsec);
10366 #endif
10367                 unlock_user_struct(target_st, arg2, 1);
10368             }
10369         }
10370         return ret;
10371 #endif
10372     case TARGET_NR_vhangup:
10373         return get_errno(vhangup());
10374 #ifdef TARGET_NR_syscall
10375     case TARGET_NR_syscall:
10376         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10377                           arg6, arg7, arg8, 0);
10378 #endif
10379 #if defined(TARGET_NR_wait4)
10380     case TARGET_NR_wait4:
10381         {
10382             int status;
10383             abi_long status_ptr = arg2;
10384             struct rusage rusage, *rusage_ptr;
10385             abi_ulong target_rusage = arg4;
10386             abi_long rusage_err;
10387             if (target_rusage)
10388                 rusage_ptr = &rusage;
10389             else
10390                 rusage_ptr = NULL;
10391             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10392             if (!is_error(ret)) {
10393                 if (status_ptr && ret) {
10394                     status = host_to_target_waitstatus(status);
10395                     if (put_user_s32(status, status_ptr))
10396                         return -TARGET_EFAULT;
10397                 }
10398                 if (target_rusage) {
10399                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10400                     if (rusage_err) {
10401                         ret = rusage_err;
10402                     }
10403                 }
10404             }
10405         }
10406         return ret;
10407 #endif
10408 #ifdef TARGET_NR_swapoff
10409     case TARGET_NR_swapoff:
10410         if (!(p = lock_user_string(arg1)))
10411             return -TARGET_EFAULT;
10412         ret = get_errno(swapoff(p));
10413         unlock_user(p, arg1, 0);
10414         return ret;
10415 #endif
10416     case TARGET_NR_sysinfo:
10417         {
10418             struct target_sysinfo *target_value;
10419             struct sysinfo value;
10420             ret = get_errno(sysinfo(&value));
10421             if (!is_error(ret) && arg1)
10422             {
10423                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10424                     return -TARGET_EFAULT;
10425                 __put_user(value.uptime, &target_value->uptime);
10426                 __put_user(value.loads[0], &target_value->loads[0]);
10427                 __put_user(value.loads[1], &target_value->loads[1]);
10428                 __put_user(value.loads[2], &target_value->loads[2]);
10429                 __put_user(value.totalram, &target_value->totalram);
10430                 __put_user(value.freeram, &target_value->freeram);
10431                 __put_user(value.sharedram, &target_value->sharedram);
10432                 __put_user(value.bufferram, &target_value->bufferram);
10433                 __put_user(value.totalswap, &target_value->totalswap);
10434                 __put_user(value.freeswap, &target_value->freeswap);
10435                 __put_user(value.procs, &target_value->procs);
10436                 __put_user(value.totalhigh, &target_value->totalhigh);
10437                 __put_user(value.freehigh, &target_value->freehigh);
10438                 __put_user(value.mem_unit, &target_value->mem_unit);
10439                 unlock_user_struct(target_value, arg1, 1);
10440             }
10441         }
10442         return ret;
10443 #ifdef TARGET_NR_ipc
10444     case TARGET_NR_ipc:
10445         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10446 #endif
10447 #ifdef TARGET_NR_semget
10448     case TARGET_NR_semget:
10449         return get_errno(semget(arg1, arg2, arg3));
10450 #endif
10451 #ifdef TARGET_NR_semop
10452     case TARGET_NR_semop:
10453         return do_semtimedop(arg1, arg2, arg3, 0, false);
10454 #endif
10455 #ifdef TARGET_NR_semtimedop
10456     case TARGET_NR_semtimedop:
10457         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10458 #endif
10459 #ifdef TARGET_NR_semtimedop_time64
10460     case TARGET_NR_semtimedop_time64:
10461         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10462 #endif
10463 #ifdef TARGET_NR_semctl
10464     case TARGET_NR_semctl:
10465         return do_semctl(arg1, arg2, arg3, arg4);
10466 #endif
10467 #ifdef TARGET_NR_msgctl
10468     case TARGET_NR_msgctl:
10469         return do_msgctl(arg1, arg2, arg3);
10470 #endif
10471 #ifdef TARGET_NR_msgget
10472     case TARGET_NR_msgget:
10473         return get_errno(msgget(arg1, arg2));
10474 #endif
10475 #ifdef TARGET_NR_msgrcv
10476     case TARGET_NR_msgrcv:
10477         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10478 #endif
10479 #ifdef TARGET_NR_msgsnd
10480     case TARGET_NR_msgsnd:
10481         return do_msgsnd(arg1, arg2, arg3, arg4);
10482 #endif
10483 #ifdef TARGET_NR_shmget
10484     case TARGET_NR_shmget:
10485         return get_errno(shmget(arg1, arg2, arg3));
10486 #endif
10487 #ifdef TARGET_NR_shmctl
10488     case TARGET_NR_shmctl:
10489         return do_shmctl(arg1, arg2, arg3);
10490 #endif
10491 #ifdef TARGET_NR_shmat
10492     case TARGET_NR_shmat:
10493         return do_shmat(cpu_env, arg1, arg2, arg3);
10494 #endif
10495 #ifdef TARGET_NR_shmdt
10496     case TARGET_NR_shmdt:
10497         return do_shmdt(arg1);
10498 #endif
10499     case TARGET_NR_fsync:
10500         return get_errno(fsync(arg1));
10501     case TARGET_NR_clone:
10502         /* Linux manages to have three different orderings for its
10503          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10504          * match the kernel's CONFIG_CLONE_* settings.
10505          * Microblaze is further special in that it uses a sixth
10506          * implicit argument to clone for the TLS pointer.
10507          */
10508 #if defined(TARGET_MICROBLAZE)
10509         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10510 #elif defined(TARGET_CLONE_BACKWARDS)
10511         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10512 #elif defined(TARGET_CLONE_BACKWARDS2)
10513         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10514 #else
10515         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10516 #endif
10517         return ret;
10518 #ifdef __NR_exit_group
10519         /* new thread calls */
10520     case TARGET_NR_exit_group:
10521         preexit_cleanup(cpu_env, arg1);
10522         return get_errno(exit_group(arg1));
10523 #endif
10524     case TARGET_NR_setdomainname:
10525         if (!(p = lock_user_string(arg1)))
10526             return -TARGET_EFAULT;
10527         ret = get_errno(setdomainname(p, arg2));
10528         unlock_user(p, arg1, 0);
10529         return ret;
10530     case TARGET_NR_uname:
10531         /* no need to transcode because we use the linux syscall */
10532         {
10533             struct new_utsname * buf;
10534 
10535             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10536                 return -TARGET_EFAULT;
10537             ret = get_errno(sys_uname(buf));
10538             if (!is_error(ret)) {
10539                 /* Overwrite the native machine name with whatever is being
10540                    emulated. */
10541                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10542                           sizeof(buf->machine));
10543                 /* Allow the user to override the reported release.  */
10544                 if (qemu_uname_release && *qemu_uname_release) {
10545                     g_strlcpy(buf->release, qemu_uname_release,
10546                               sizeof(buf->release));
10547                 }
10548             }
10549             unlock_user_struct(buf, arg1, 1);
10550         }
10551         return ret;
10552 #ifdef TARGET_I386
10553     case TARGET_NR_modify_ldt:
10554         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10555 #if !defined(TARGET_X86_64)
10556     case TARGET_NR_vm86:
10557         return do_vm86(cpu_env, arg1, arg2);
10558 #endif
10559 #endif
10560 #if defined(TARGET_NR_adjtimex)
10561     case TARGET_NR_adjtimex:
10562         {
10563             struct timex host_buf;
10564 
10565             if (target_to_host_timex(&host_buf, arg1) != 0) {
10566                 return -TARGET_EFAULT;
10567             }
10568             ret = get_errno(adjtimex(&host_buf));
10569             if (!is_error(ret)) {
10570                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10571                     return -TARGET_EFAULT;
10572                 }
10573             }
10574         }
10575         return ret;
10576 #endif
10577 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10578     case TARGET_NR_clock_adjtime:
10579         {
10580             struct timex htx, *phtx = &htx;
10581 
10582             if (target_to_host_timex(phtx, arg2) != 0) {
10583                 return -TARGET_EFAULT;
10584             }
10585             ret = get_errno(clock_adjtime(arg1, phtx));
10586             if (!is_error(ret) && phtx) {
10587                 if (host_to_target_timex(arg2, phtx) != 0) {
10588                     return -TARGET_EFAULT;
10589                 }
10590             }
10591         }
10592         return ret;
10593 #endif
10594 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10595     case TARGET_NR_clock_adjtime64:
10596         {
10597             struct timex htx;
10598 
10599             if (target_to_host_timex64(&htx, arg2) != 0) {
10600                 return -TARGET_EFAULT;
10601             }
10602             ret = get_errno(clock_adjtime(arg1, &htx));
10603             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10604                     return -TARGET_EFAULT;
10605             }
10606         }
10607         return ret;
10608 #endif
10609     case TARGET_NR_getpgid:
10610         return get_errno(getpgid(arg1));
10611     case TARGET_NR_fchdir:
10612         return get_errno(fchdir(arg1));
10613     case TARGET_NR_personality:
10614         return get_errno(personality(arg1));
10615 #ifdef TARGET_NR__llseek /* Not on alpha */
10616     case TARGET_NR__llseek:
10617         {
10618             int64_t res;
10619 #if !defined(__NR_llseek)
10620             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10621             if (res == -1) {
10622                 ret = get_errno(res);
10623             } else {
10624                 ret = 0;
10625             }
10626 #else
10627             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10628 #endif
10629             if ((ret == 0) && put_user_s64(res, arg4)) {
10630                 return -TARGET_EFAULT;
10631             }
10632         }
10633         return ret;
10634 #endif
10635 #ifdef TARGET_NR_getdents
10636     case TARGET_NR_getdents:
10637         return do_getdents(arg1, arg2, arg3);
10638 #endif /* TARGET_NR_getdents */
10639 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10640     case TARGET_NR_getdents64:
10641         return do_getdents64(arg1, arg2, arg3);
10642 #endif /* TARGET_NR_getdents64 */
10643 #if defined(TARGET_NR__newselect)
10644     case TARGET_NR__newselect:
10645         return do_select(arg1, arg2, arg3, arg4, arg5);
10646 #endif
10647 #ifdef TARGET_NR_poll
10648     case TARGET_NR_poll:
10649         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10650 #endif
10651 #ifdef TARGET_NR_ppoll
10652     case TARGET_NR_ppoll:
10653         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10654 #endif
10655 #ifdef TARGET_NR_ppoll_time64
10656     case TARGET_NR_ppoll_time64:
10657         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10658 #endif
10659     case TARGET_NR_flock:
10660         /* NOTE: the flock constant seems to be the same for every
10661            Linux platform */
10662         return get_errno(safe_flock(arg1, arg2));
10663     case TARGET_NR_readv:
10664         {
10665             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10666             if (vec != NULL) {
10667                 ret = get_errno(safe_readv(arg1, vec, arg3));
10668                 unlock_iovec(vec, arg2, arg3, 1);
10669             } else {
10670                 ret = -host_to_target_errno(errno);
10671             }
10672         }
10673         return ret;
10674     case TARGET_NR_writev:
10675         {
10676             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10677             if (vec != NULL) {
10678                 ret = get_errno(safe_writev(arg1, vec, arg3));
10679                 unlock_iovec(vec, arg2, arg3, 0);
10680             } else {
10681                 ret = -host_to_target_errno(errno);
10682             }
10683         }
10684         return ret;
10685 #if defined(TARGET_NR_preadv)
10686     case TARGET_NR_preadv:
10687         {
10688             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10689             if (vec != NULL) {
10690                 unsigned long low, high;
10691 
10692                 target_to_host_low_high(arg4, arg5, &low, &high);
10693                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10694                 unlock_iovec(vec, arg2, arg3, 1);
10695             } else {
10696                 ret = -host_to_target_errno(errno);
10697            }
10698         }
10699         return ret;
10700 #endif
10701 #if defined(TARGET_NR_pwritev)
10702     case TARGET_NR_pwritev:
10703         {
10704             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10705             if (vec != NULL) {
10706                 unsigned long low, high;
10707 
10708                 target_to_host_low_high(arg4, arg5, &low, &high);
10709                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10710                 unlock_iovec(vec, arg2, arg3, 0);
10711             } else {
10712                 ret = -host_to_target_errno(errno);
10713            }
10714         }
10715         return ret;
10716 #endif
10717     case TARGET_NR_getsid:
10718         return get_errno(getsid(arg1));
10719 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10720     case TARGET_NR_fdatasync:
10721         return get_errno(fdatasync(arg1));
10722 #endif
10723     case TARGET_NR_sched_getaffinity:
10724         {
10725             unsigned int mask_size;
10726             unsigned long *mask;
10727 
10728             /*
10729              * sched_getaffinity needs multiples of ulong, so need to take
10730              * care of mismatches between target ulong and host ulong sizes.
10731              */
10732             if (arg2 & (sizeof(abi_ulong) - 1)) {
10733                 return -TARGET_EINVAL;
10734             }
10735             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10736 
10737             mask = alloca(mask_size);
10738             memset(mask, 0, mask_size);
10739             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10740 
10741             if (!is_error(ret)) {
10742                 if (ret > arg2) {
10743                     /* More data returned than the caller's buffer will fit.
10744                      * This only happens if sizeof(abi_long) < sizeof(long)
10745                      * and the caller passed us a buffer holding an odd number
10746                      * of abi_longs. If the host kernel is actually using the
10747                      * extra 4 bytes then fail EINVAL; otherwise we can just
10748                      * ignore them and only copy the interesting part.
10749                      */
10750                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10751                     if (numcpus > arg2 * 8) {
10752                         return -TARGET_EINVAL;
10753                     }
10754                     ret = arg2;
10755                 }
10756 
10757                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10758                     return -TARGET_EFAULT;
10759                 }
10760             }
10761         }
10762         return ret;
10763     case TARGET_NR_sched_setaffinity:
10764         {
10765             unsigned int mask_size;
10766             unsigned long *mask;
10767 
10768             /*
10769              * sched_setaffinity needs multiples of ulong, so need to take
10770              * care of mismatches between target ulong and host ulong sizes.
10771              */
10772             if (arg2 & (sizeof(abi_ulong) - 1)) {
10773                 return -TARGET_EINVAL;
10774             }
10775             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10776             mask = alloca(mask_size);
10777 
10778             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10779             if (ret) {
10780                 return ret;
10781             }
10782 
10783             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10784         }
10785     case TARGET_NR_getcpu:
10786         {
10787             unsigned cpu, node;
10788             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10789                                        arg2 ? &node : NULL,
10790                                        NULL));
10791             if (is_error(ret)) {
10792                 return ret;
10793             }
10794             if (arg1 && put_user_u32(cpu, arg1)) {
10795                 return -TARGET_EFAULT;
10796             }
10797             if (arg2 && put_user_u32(node, arg2)) {
10798                 return -TARGET_EFAULT;
10799             }
10800         }
10801         return ret;
10802     case TARGET_NR_sched_setparam:
10803         {
10804             struct target_sched_param *target_schp;
10805             struct sched_param schp;
10806 
10807             if (arg2 == 0) {
10808                 return -TARGET_EINVAL;
10809             }
10810             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10811                 return -TARGET_EFAULT;
10812             }
10813             schp.sched_priority = tswap32(target_schp->sched_priority);
10814             unlock_user_struct(target_schp, arg2, 0);
10815             return get_errno(sys_sched_setparam(arg1, &schp));
10816         }
10817     case TARGET_NR_sched_getparam:
10818         {
10819             struct target_sched_param *target_schp;
10820             struct sched_param schp;
10821 
10822             if (arg2 == 0) {
10823                 return -TARGET_EINVAL;
10824             }
10825             ret = get_errno(sys_sched_getparam(arg1, &schp));
10826             if (!is_error(ret)) {
10827                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10828                     return -TARGET_EFAULT;
10829                 }
10830                 target_schp->sched_priority = tswap32(schp.sched_priority);
10831                 unlock_user_struct(target_schp, arg2, 1);
10832             }
10833         }
10834         return ret;
10835     case TARGET_NR_sched_setscheduler:
10836         {
10837             struct target_sched_param *target_schp;
10838             struct sched_param schp;
10839             if (arg3 == 0) {
10840                 return -TARGET_EINVAL;
10841             }
10842             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10843                 return -TARGET_EFAULT;
10844             }
10845             schp.sched_priority = tswap32(target_schp->sched_priority);
10846             unlock_user_struct(target_schp, arg3, 0);
10847             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10848         }
10849     case TARGET_NR_sched_getscheduler:
10850         return get_errno(sys_sched_getscheduler(arg1));
10851     case TARGET_NR_sched_getattr:
10852         {
10853             struct target_sched_attr *target_scha;
10854             struct sched_attr scha;
10855             if (arg2 == 0) {
10856                 return -TARGET_EINVAL;
10857             }
10858             if (arg3 > sizeof(scha)) {
10859                 arg3 = sizeof(scha);
10860             }
10861             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10862             if (!is_error(ret)) {
10863                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10864                 if (!target_scha) {
10865                     return -TARGET_EFAULT;
10866                 }
10867                 target_scha->size = tswap32(scha.size);
10868                 target_scha->sched_policy = tswap32(scha.sched_policy);
10869                 target_scha->sched_flags = tswap64(scha.sched_flags);
10870                 target_scha->sched_nice = tswap32(scha.sched_nice);
10871                 target_scha->sched_priority = tswap32(scha.sched_priority);
10872                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10873                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10874                 target_scha->sched_period = tswap64(scha.sched_period);
10875                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10876                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10877                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10878                 }
10879                 unlock_user(target_scha, arg2, arg3);
10880             }
10881             return ret;
10882         }
10883     case TARGET_NR_sched_setattr:
10884         {
10885             struct target_sched_attr *target_scha;
10886             struct sched_attr scha;
10887             uint32_t size;
10888             int zeroed;
10889             if (arg2 == 0) {
10890                 return -TARGET_EINVAL;
10891             }
10892             if (get_user_u32(size, arg2)) {
10893                 return -TARGET_EFAULT;
10894             }
10895             if (!size) {
10896                 size = offsetof(struct target_sched_attr, sched_util_min);
10897             }
10898             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10899                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10900                     return -TARGET_EFAULT;
10901                 }
10902                 return -TARGET_E2BIG;
10903             }
10904 
10905             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10906             if (zeroed < 0) {
10907                 return zeroed;
10908             } else if (zeroed == 0) {
10909                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10910                     return -TARGET_EFAULT;
10911                 }
10912                 return -TARGET_E2BIG;
10913             }
10914             if (size > sizeof(struct target_sched_attr)) {
10915                 size = sizeof(struct target_sched_attr);
10916             }
10917 
10918             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10919             if (!target_scha) {
10920                 return -TARGET_EFAULT;
10921             }
10922             scha.size = size;
10923             scha.sched_policy = tswap32(target_scha->sched_policy);
10924             scha.sched_flags = tswap64(target_scha->sched_flags);
10925             scha.sched_nice = tswap32(target_scha->sched_nice);
10926             scha.sched_priority = tswap32(target_scha->sched_priority);
10927             scha.sched_runtime = tswap64(target_scha->sched_runtime);
10928             scha.sched_deadline = tswap64(target_scha->sched_deadline);
10929             scha.sched_period = tswap64(target_scha->sched_period);
10930             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10931                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10932                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10933             }
10934             unlock_user(target_scha, arg2, 0);
10935             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10936         }
10937     case TARGET_NR_sched_yield:
10938         return get_errno(sched_yield());
10939     case TARGET_NR_sched_get_priority_max:
10940         return get_errno(sched_get_priority_max(arg1));
10941     case TARGET_NR_sched_get_priority_min:
10942         return get_errno(sched_get_priority_min(arg1));
10943 #ifdef TARGET_NR_sched_rr_get_interval
10944     case TARGET_NR_sched_rr_get_interval:
10945         {
10946             struct timespec ts;
10947             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10948             if (!is_error(ret)) {
10949                 ret = host_to_target_timespec(arg2, &ts);
10950             }
10951         }
10952         return ret;
10953 #endif
10954 #ifdef TARGET_NR_sched_rr_get_interval_time64
10955     case TARGET_NR_sched_rr_get_interval_time64:
10956         {
10957             struct timespec ts;
10958             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10959             if (!is_error(ret)) {
10960                 ret = host_to_target_timespec64(arg2, &ts);
10961             }
10962         }
10963         return ret;
10964 #endif
10965 #if defined(TARGET_NR_nanosleep)
10966     case TARGET_NR_nanosleep:
10967         {
10968             struct timespec req, rem;
10969             target_to_host_timespec(&req, arg1);
10970             ret = get_errno(safe_nanosleep(&req, &rem));
10971             if (is_error(ret) && arg2) {
10972                 host_to_target_timespec(arg2, &rem);
10973             }
10974         }
10975         return ret;
10976 #endif
10977     case TARGET_NR_prctl:
10978         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10979         break;
10980 #ifdef TARGET_NR_arch_prctl
10981     case TARGET_NR_arch_prctl:
10982         return do_arch_prctl(cpu_env, arg1, arg2);
10983 #endif
10984 #ifdef TARGET_NR_pread64
10985     case TARGET_NR_pread64:
10986         if (regpairs_aligned(cpu_env, num)) {
10987             arg4 = arg5;
10988             arg5 = arg6;
10989         }
10990         if (arg2 == 0 && arg3 == 0) {
10991             /* Special-case NULL buffer and zero length, which should succeed */
10992             p = 0;
10993         } else {
10994             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10995             if (!p) {
10996                 return -TARGET_EFAULT;
10997             }
10998         }
10999         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11000         unlock_user(p, arg2, ret);
11001         return ret;
11002     case TARGET_NR_pwrite64:
11003         if (regpairs_aligned(cpu_env, num)) {
11004             arg4 = arg5;
11005             arg5 = arg6;
11006         }
11007         if (arg2 == 0 && arg3 == 0) {
11008             /* Special-case NULL buffer and zero length, which should succeed */
11009             p = 0;
11010         } else {
11011             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11012             if (!p) {
11013                 return -TARGET_EFAULT;
11014             }
11015         }
11016         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11017         unlock_user(p, arg2, 0);
11018         return ret;
11019 #endif
11020     case TARGET_NR_getcwd:
11021         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11022             return -TARGET_EFAULT;
11023         ret = get_errno(sys_getcwd1(p, arg2));
11024         unlock_user(p, arg1, ret);
11025         return ret;
11026     case TARGET_NR_capget:
11027     case TARGET_NR_capset:
11028     {
11029         struct target_user_cap_header *target_header;
11030         struct target_user_cap_data *target_data = NULL;
11031         struct __user_cap_header_struct header;
11032         struct __user_cap_data_struct data[2];
11033         struct __user_cap_data_struct *dataptr = NULL;
11034         int i, target_datalen;
11035         int data_items = 1;
11036 
11037         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11038             return -TARGET_EFAULT;
11039         }
11040         header.version = tswap32(target_header->version);
11041         header.pid = tswap32(target_header->pid);
11042 
11043         if (header.version != _LINUX_CAPABILITY_VERSION) {
11044             /* Version 2 and up takes pointer to two user_data structs */
11045             data_items = 2;
11046         }
11047 
11048         target_datalen = sizeof(*target_data) * data_items;
11049 
11050         if (arg2) {
11051             if (num == TARGET_NR_capget) {
11052                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11053             } else {
11054                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11055             }
11056             if (!target_data) {
11057                 unlock_user_struct(target_header, arg1, 0);
11058                 return -TARGET_EFAULT;
11059             }
11060 
11061             if (num == TARGET_NR_capset) {
11062                 for (i = 0; i < data_items; i++) {
11063                     data[i].effective = tswap32(target_data[i].effective);
11064                     data[i].permitted = tswap32(target_data[i].permitted);
11065                     data[i].inheritable = tswap32(target_data[i].inheritable);
11066                 }
11067             }
11068 
11069             dataptr = data;
11070         }
11071 
11072         if (num == TARGET_NR_capget) {
11073             ret = get_errno(capget(&header, dataptr));
11074         } else {
11075             ret = get_errno(capset(&header, dataptr));
11076         }
11077 
11078         /* The kernel always updates version for both capget and capset */
11079         target_header->version = tswap32(header.version);
11080         unlock_user_struct(target_header, arg1, 1);
11081 
11082         if (arg2) {
11083             if (num == TARGET_NR_capget) {
11084                 for (i = 0; i < data_items; i++) {
11085                     target_data[i].effective = tswap32(data[i].effective);
11086                     target_data[i].permitted = tswap32(data[i].permitted);
11087                     target_data[i].inheritable = tswap32(data[i].inheritable);
11088                 }
11089                 unlock_user(target_data, arg2, target_datalen);
11090             } else {
11091                 unlock_user(target_data, arg2, 0);
11092             }
11093         }
11094         return ret;
11095     }
11096     case TARGET_NR_sigaltstack:
11097         return do_sigaltstack(arg1, arg2, cpu_env);
11098 
11099 #ifdef CONFIG_SENDFILE
11100 #ifdef TARGET_NR_sendfile
11101     case TARGET_NR_sendfile:
11102     {
11103         off_t *offp = NULL;
11104         off_t off;
11105         if (arg3) {
11106             ret = get_user_sal(off, arg3);
11107             if (is_error(ret)) {
11108                 return ret;
11109             }
11110             offp = &off;
11111         }
11112         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11113         if (!is_error(ret) && arg3) {
11114             abi_long ret2 = put_user_sal(off, arg3);
11115             if (is_error(ret2)) {
11116                 ret = ret2;
11117             }
11118         }
11119         return ret;
11120     }
11121 #endif
11122 #ifdef TARGET_NR_sendfile64
11123     case TARGET_NR_sendfile64:
11124     {
11125         off_t *offp = NULL;
11126         off_t off;
11127         if (arg3) {
11128             ret = get_user_s64(off, arg3);
11129             if (is_error(ret)) {
11130                 return ret;
11131             }
11132             offp = &off;
11133         }
11134         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11135         if (!is_error(ret) && arg3) {
11136             abi_long ret2 = put_user_s64(off, arg3);
11137             if (is_error(ret2)) {
11138                 ret = ret2;
11139             }
11140         }
11141         return ret;
11142     }
11143 #endif
11144 #endif
11145 #ifdef TARGET_NR_vfork
11146     case TARGET_NR_vfork:
11147         return get_errno(do_fork(cpu_env,
11148                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11149                          0, 0, 0, 0));
11150 #endif
11151 #ifdef TARGET_NR_ugetrlimit
11152     case TARGET_NR_ugetrlimit:
11153     {
11154 	struct rlimit rlim;
11155 	int resource = target_to_host_resource(arg1);
11156 	ret = get_errno(getrlimit(resource, &rlim));
11157 	if (!is_error(ret)) {
11158 	    struct target_rlimit *target_rlim;
11159             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11160                 return -TARGET_EFAULT;
11161 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11162 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11163             unlock_user_struct(target_rlim, arg2, 1);
11164 	}
11165         return ret;
11166     }
11167 #endif
11168 #ifdef TARGET_NR_truncate64
11169     case TARGET_NR_truncate64:
11170         if (!(p = lock_user_string(arg1)))
11171             return -TARGET_EFAULT;
11172 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11173         unlock_user(p, arg1, 0);
11174         return ret;
11175 #endif
11176 #ifdef TARGET_NR_ftruncate64
11177     case TARGET_NR_ftruncate64:
11178         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11179 #endif
11180 #ifdef TARGET_NR_stat64
11181     case TARGET_NR_stat64:
11182         if (!(p = lock_user_string(arg1))) {
11183             return -TARGET_EFAULT;
11184         }
11185         ret = get_errno(stat(path(p), &st));
11186         unlock_user(p, arg1, 0);
11187         if (!is_error(ret))
11188             ret = host_to_target_stat64(cpu_env, arg2, &st);
11189         return ret;
11190 #endif
11191 #ifdef TARGET_NR_lstat64
11192     case TARGET_NR_lstat64:
11193         if (!(p = lock_user_string(arg1))) {
11194             return -TARGET_EFAULT;
11195         }
11196         ret = get_errno(lstat(path(p), &st));
11197         unlock_user(p, arg1, 0);
11198         if (!is_error(ret))
11199             ret = host_to_target_stat64(cpu_env, arg2, &st);
11200         return ret;
11201 #endif
11202 #ifdef TARGET_NR_fstat64
11203     case TARGET_NR_fstat64:
11204         ret = get_errno(fstat(arg1, &st));
11205         if (!is_error(ret))
11206             ret = host_to_target_stat64(cpu_env, arg2, &st);
11207         return ret;
11208 #endif
11209 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11210 #ifdef TARGET_NR_fstatat64
11211     case TARGET_NR_fstatat64:
11212 #endif
11213 #ifdef TARGET_NR_newfstatat
11214     case TARGET_NR_newfstatat:
11215 #endif
11216         if (!(p = lock_user_string(arg2))) {
11217             return -TARGET_EFAULT;
11218         }
11219         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11220         unlock_user(p, arg2, 0);
11221         if (!is_error(ret))
11222             ret = host_to_target_stat64(cpu_env, arg3, &st);
11223         return ret;
11224 #endif
11225 #if defined(TARGET_NR_statx)
11226     case TARGET_NR_statx:
11227         {
11228             struct target_statx *target_stx;
11229             int dirfd = arg1;
11230             int flags = arg3;
11231 
11232             p = lock_user_string(arg2);
11233             if (p == NULL) {
11234                 return -TARGET_EFAULT;
11235             }
11236 #if defined(__NR_statx)
11237             {
11238                 /*
11239                  * It is assumed that struct statx is architecture independent.
11240                  */
11241                 struct target_statx host_stx;
11242                 int mask = arg4;
11243 
11244                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11245                 if (!is_error(ret)) {
11246                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11247                         unlock_user(p, arg2, 0);
11248                         return -TARGET_EFAULT;
11249                     }
11250                 }
11251 
11252                 if (ret != -TARGET_ENOSYS) {
11253                     unlock_user(p, arg2, 0);
11254                     return ret;
11255                 }
11256             }
11257 #endif
11258             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11259             unlock_user(p, arg2, 0);
11260 
11261             if (!is_error(ret)) {
11262                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11263                     return -TARGET_EFAULT;
11264                 }
11265                 memset(target_stx, 0, sizeof(*target_stx));
11266                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11267                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11268                 __put_user(st.st_ino, &target_stx->stx_ino);
11269                 __put_user(st.st_mode, &target_stx->stx_mode);
11270                 __put_user(st.st_uid, &target_stx->stx_uid);
11271                 __put_user(st.st_gid, &target_stx->stx_gid);
11272                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11273                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11274                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11275                 __put_user(st.st_size, &target_stx->stx_size);
11276                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11277                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11278                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11279                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11280                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11281                 unlock_user_struct(target_stx, arg5, 1);
11282             }
11283         }
11284         return ret;
11285 #endif
11286 #ifdef TARGET_NR_lchown
11287     case TARGET_NR_lchown:
11288         if (!(p = lock_user_string(arg1)))
11289             return -TARGET_EFAULT;
11290         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11291         unlock_user(p, arg1, 0);
11292         return ret;
11293 #endif
11294 #ifdef TARGET_NR_getuid
11295     case TARGET_NR_getuid:
11296         return get_errno(high2lowuid(getuid()));
11297 #endif
11298 #ifdef TARGET_NR_getgid
11299     case TARGET_NR_getgid:
11300         return get_errno(high2lowgid(getgid()));
11301 #endif
11302 #ifdef TARGET_NR_geteuid
11303     case TARGET_NR_geteuid:
11304         return get_errno(high2lowuid(geteuid()));
11305 #endif
11306 #ifdef TARGET_NR_getegid
11307     case TARGET_NR_getegid:
11308         return get_errno(high2lowgid(getegid()));
11309 #endif
11310     case TARGET_NR_setreuid:
11311         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11312     case TARGET_NR_setregid:
11313         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11314     case TARGET_NR_getgroups:
11315         {
11316             int gidsetsize = arg1;
11317             target_id *target_grouplist;
11318             gid_t *grouplist;
11319             int i;
11320 
11321             grouplist = alloca(gidsetsize * sizeof(gid_t));
11322             ret = get_errno(getgroups(gidsetsize, grouplist));
11323             if (gidsetsize == 0)
11324                 return ret;
11325             if (!is_error(ret)) {
11326                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11327                 if (!target_grouplist)
11328                     return -TARGET_EFAULT;
11329                 for(i = 0;i < ret; i++)
11330                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11331                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11332             }
11333         }
11334         return ret;
11335     case TARGET_NR_setgroups:
11336         {
11337             int gidsetsize = arg1;
11338             target_id *target_grouplist;
11339             gid_t *grouplist = NULL;
11340             int i;
11341             if (gidsetsize) {
11342                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11343                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11344                 if (!target_grouplist) {
11345                     return -TARGET_EFAULT;
11346                 }
11347                 for (i = 0; i < gidsetsize; i++) {
11348                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11349                 }
11350                 unlock_user(target_grouplist, arg2, 0);
11351             }
11352             return get_errno(setgroups(gidsetsize, grouplist));
11353         }
11354     case TARGET_NR_fchown:
11355         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11356 #if defined(TARGET_NR_fchownat)
11357     case TARGET_NR_fchownat:
11358         if (!(p = lock_user_string(arg2)))
11359             return -TARGET_EFAULT;
11360         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11361                                  low2highgid(arg4), arg5));
11362         unlock_user(p, arg2, 0);
11363         return ret;
11364 #endif
11365 #ifdef TARGET_NR_setresuid
11366     case TARGET_NR_setresuid:
11367         return get_errno(sys_setresuid(low2highuid(arg1),
11368                                        low2highuid(arg2),
11369                                        low2highuid(arg3)));
11370 #endif
11371 #ifdef TARGET_NR_getresuid
11372     case TARGET_NR_getresuid:
11373         {
11374             uid_t ruid, euid, suid;
11375             ret = get_errno(getresuid(&ruid, &euid, &suid));
11376             if (!is_error(ret)) {
11377                 if (put_user_id(high2lowuid(ruid), arg1)
11378                     || put_user_id(high2lowuid(euid), arg2)
11379                     || put_user_id(high2lowuid(suid), arg3))
11380                     return -TARGET_EFAULT;
11381             }
11382         }
11383         return ret;
11384 #endif
11385 #ifdef TARGET_NR_getresgid
11386     case TARGET_NR_setresgid:
11387         return get_errno(sys_setresgid(low2highgid(arg1),
11388                                        low2highgid(arg2),
11389                                        low2highgid(arg3)));
11390 #endif
11391 #ifdef TARGET_NR_getresgid
11392     case TARGET_NR_getresgid:
11393         {
11394             gid_t rgid, egid, sgid;
11395             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11396             if (!is_error(ret)) {
11397                 if (put_user_id(high2lowgid(rgid), arg1)
11398                     || put_user_id(high2lowgid(egid), arg2)
11399                     || put_user_id(high2lowgid(sgid), arg3))
11400                     return -TARGET_EFAULT;
11401             }
11402         }
11403         return ret;
11404 #endif
11405 #ifdef TARGET_NR_chown
11406     case TARGET_NR_chown:
11407         if (!(p = lock_user_string(arg1)))
11408             return -TARGET_EFAULT;
11409         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11410         unlock_user(p, arg1, 0);
11411         return ret;
11412 #endif
11413     case TARGET_NR_setuid:
11414         return get_errno(sys_setuid(low2highuid(arg1)));
11415     case TARGET_NR_setgid:
11416         return get_errno(sys_setgid(low2highgid(arg1)));
11417     case TARGET_NR_setfsuid:
11418         return get_errno(setfsuid(arg1));
11419     case TARGET_NR_setfsgid:
11420         return get_errno(setfsgid(arg1));
11421 
11422 #ifdef TARGET_NR_lchown32
11423     case TARGET_NR_lchown32:
11424         if (!(p = lock_user_string(arg1)))
11425             return -TARGET_EFAULT;
11426         ret = get_errno(lchown(p, arg2, arg3));
11427         unlock_user(p, arg1, 0);
11428         return ret;
11429 #endif
11430 #ifdef TARGET_NR_getuid32
11431     case TARGET_NR_getuid32:
11432         return get_errno(getuid());
11433 #endif
11434 
11435 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11436    /* Alpha specific */
11437     case TARGET_NR_getxuid:
11438          {
11439             uid_t euid;
11440             euid=geteuid();
11441             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11442          }
11443         return get_errno(getuid());
11444 #endif
11445 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11446    /* Alpha specific */
11447     case TARGET_NR_getxgid:
11448          {
11449             uid_t egid;
11450             egid=getegid();
11451             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11452          }
11453         return get_errno(getgid());
11454 #endif
11455 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11456     /* Alpha specific */
11457     case TARGET_NR_osf_getsysinfo:
11458         ret = -TARGET_EOPNOTSUPP;
11459         switch (arg1) {
11460           case TARGET_GSI_IEEE_FP_CONTROL:
11461             {
11462                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11463                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11464 
11465                 swcr &= ~SWCR_STATUS_MASK;
11466                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11467 
11468                 if (put_user_u64 (swcr, arg2))
11469                         return -TARGET_EFAULT;
11470                 ret = 0;
11471             }
11472             break;
11473 
11474           /* case GSI_IEEE_STATE_AT_SIGNAL:
11475              -- Not implemented in linux kernel.
11476              case GSI_UACPROC:
11477              -- Retrieves current unaligned access state; not much used.
11478              case GSI_PROC_TYPE:
11479              -- Retrieves implver information; surely not used.
11480              case GSI_GET_HWRPB:
11481              -- Grabs a copy of the HWRPB; surely not used.
11482           */
11483         }
11484         return ret;
11485 #endif
11486 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11487     /* Alpha specific */
11488     case TARGET_NR_osf_setsysinfo:
11489         ret = -TARGET_EOPNOTSUPP;
11490         switch (arg1) {
11491           case TARGET_SSI_IEEE_FP_CONTROL:
11492             {
11493                 uint64_t swcr, fpcr;
11494 
11495                 if (get_user_u64 (swcr, arg2)) {
11496                     return -TARGET_EFAULT;
11497                 }
11498 
11499                 /*
11500                  * The kernel calls swcr_update_status to update the
11501                  * status bits from the fpcr at every point that it
11502                  * could be queried.  Therefore, we store the status
11503                  * bits only in FPCR.
11504                  */
11505                 ((CPUAlphaState *)cpu_env)->swcr
11506                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11507 
11508                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11509                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11510                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11511                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11512                 ret = 0;
11513             }
11514             break;
11515 
11516           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11517             {
11518                 uint64_t exc, fpcr, fex;
11519 
11520                 if (get_user_u64(exc, arg2)) {
11521                     return -TARGET_EFAULT;
11522                 }
11523                 exc &= SWCR_STATUS_MASK;
11524                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11525 
11526                 /* Old exceptions are not signaled.  */
11527                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11528                 fex = exc & ~fex;
11529                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11530                 fex &= ((CPUArchState *)cpu_env)->swcr;
11531 
11532                 /* Update the hardware fpcr.  */
11533                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11534                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11535 
11536                 if (fex) {
11537                     int si_code = TARGET_FPE_FLTUNK;
11538                     target_siginfo_t info;
11539 
11540                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11541                         si_code = TARGET_FPE_FLTUND;
11542                     }
11543                     if (fex & SWCR_TRAP_ENABLE_INE) {
11544                         si_code = TARGET_FPE_FLTRES;
11545                     }
11546                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11547                         si_code = TARGET_FPE_FLTUND;
11548                     }
11549                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11550                         si_code = TARGET_FPE_FLTOVF;
11551                     }
11552                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11553                         si_code = TARGET_FPE_FLTDIV;
11554                     }
11555                     if (fex & SWCR_TRAP_ENABLE_INV) {
11556                         si_code = TARGET_FPE_FLTINV;
11557                     }
11558 
11559                     info.si_signo = SIGFPE;
11560                     info.si_errno = 0;
11561                     info.si_code = si_code;
11562                     info._sifields._sigfault._addr
11563                         = ((CPUArchState *)cpu_env)->pc;
11564                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11565                                  QEMU_SI_FAULT, &info);
11566                 }
11567                 ret = 0;
11568             }
11569             break;
11570 
11571           /* case SSI_NVPAIRS:
11572              -- Used with SSIN_UACPROC to enable unaligned accesses.
11573              case SSI_IEEE_STATE_AT_SIGNAL:
11574              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11575              -- Not implemented in linux kernel
11576           */
11577         }
11578         return ret;
11579 #endif
11580 #ifdef TARGET_NR_osf_sigprocmask
11581     /* Alpha specific.  */
11582     case TARGET_NR_osf_sigprocmask:
11583         {
11584             abi_ulong mask;
11585             int how;
11586             sigset_t set, oldset;
11587 
11588             switch(arg1) {
11589             case TARGET_SIG_BLOCK:
11590                 how = SIG_BLOCK;
11591                 break;
11592             case TARGET_SIG_UNBLOCK:
11593                 how = SIG_UNBLOCK;
11594                 break;
11595             case TARGET_SIG_SETMASK:
11596                 how = SIG_SETMASK;
11597                 break;
11598             default:
11599                 return -TARGET_EINVAL;
11600             }
11601             mask = arg2;
11602             target_to_host_old_sigset(&set, &mask);
11603             ret = do_sigprocmask(how, &set, &oldset);
11604             if (!ret) {
11605                 host_to_target_old_sigset(&mask, &oldset);
11606                 ret = mask;
11607             }
11608         }
11609         return ret;
11610 #endif
11611 
11612 #ifdef TARGET_NR_getgid32
11613     case TARGET_NR_getgid32:
11614         return get_errno(getgid());
11615 #endif
11616 #ifdef TARGET_NR_geteuid32
11617     case TARGET_NR_geteuid32:
11618         return get_errno(geteuid());
11619 #endif
11620 #ifdef TARGET_NR_getegid32
11621     case TARGET_NR_getegid32:
11622         return get_errno(getegid());
11623 #endif
11624 #ifdef TARGET_NR_setreuid32
11625     case TARGET_NR_setreuid32:
11626         return get_errno(setreuid(arg1, arg2));
11627 #endif
11628 #ifdef TARGET_NR_setregid32
11629     case TARGET_NR_setregid32:
11630         return get_errno(setregid(arg1, arg2));
11631 #endif
11632 #ifdef TARGET_NR_getgroups32
11633     case TARGET_NR_getgroups32:
11634         {
11635             int gidsetsize = arg1;
11636             uint32_t *target_grouplist;
11637             gid_t *grouplist;
11638             int i;
11639 
11640             grouplist = alloca(gidsetsize * sizeof(gid_t));
11641             ret = get_errno(getgroups(gidsetsize, grouplist));
11642             if (gidsetsize == 0)
11643                 return ret;
11644             if (!is_error(ret)) {
11645                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11646                 if (!target_grouplist) {
11647                     return -TARGET_EFAULT;
11648                 }
11649                 for(i = 0;i < ret; i++)
11650                     target_grouplist[i] = tswap32(grouplist[i]);
11651                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11652             }
11653         }
11654         return ret;
11655 #endif
11656 #ifdef TARGET_NR_setgroups32
11657     case TARGET_NR_setgroups32:
11658         {
11659             int gidsetsize = arg1;
11660             uint32_t *target_grouplist;
11661             gid_t *grouplist;
11662             int i;
11663 
11664             grouplist = alloca(gidsetsize * sizeof(gid_t));
11665             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11666             if (!target_grouplist) {
11667                 return -TARGET_EFAULT;
11668             }
11669             for(i = 0;i < gidsetsize; i++)
11670                 grouplist[i] = tswap32(target_grouplist[i]);
11671             unlock_user(target_grouplist, arg2, 0);
11672             return get_errno(setgroups(gidsetsize, grouplist));
11673         }
11674 #endif
11675 #ifdef TARGET_NR_fchown32
11676     case TARGET_NR_fchown32:
11677         return get_errno(fchown(arg1, arg2, arg3));
11678 #endif
11679 #ifdef TARGET_NR_setresuid32
11680     case TARGET_NR_setresuid32:
11681         return get_errno(sys_setresuid(arg1, arg2, arg3));
11682 #endif
11683 #ifdef TARGET_NR_getresuid32
11684     case TARGET_NR_getresuid32:
11685         {
11686             uid_t ruid, euid, suid;
11687             ret = get_errno(getresuid(&ruid, &euid, &suid));
11688             if (!is_error(ret)) {
11689                 if (put_user_u32(ruid, arg1)
11690                     || put_user_u32(euid, arg2)
11691                     || put_user_u32(suid, arg3))
11692                     return -TARGET_EFAULT;
11693             }
11694         }
11695         return ret;
11696 #endif
11697 #ifdef TARGET_NR_setresgid32
11698     case TARGET_NR_setresgid32:
11699         return get_errno(sys_setresgid(arg1, arg2, arg3));
11700 #endif
11701 #ifdef TARGET_NR_getresgid32
11702     case TARGET_NR_getresgid32:
11703         {
11704             gid_t rgid, egid, sgid;
11705             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11706             if (!is_error(ret)) {
11707                 if (put_user_u32(rgid, arg1)
11708                     || put_user_u32(egid, arg2)
11709                     || put_user_u32(sgid, arg3))
11710                     return -TARGET_EFAULT;
11711             }
11712         }
11713         return ret;
11714 #endif
11715 #ifdef TARGET_NR_chown32
11716     case TARGET_NR_chown32:
11717         if (!(p = lock_user_string(arg1)))
11718             return -TARGET_EFAULT;
11719         ret = get_errno(chown(p, arg2, arg3));
11720         unlock_user(p, arg1, 0);
11721         return ret;
11722 #endif
11723 #ifdef TARGET_NR_setuid32
11724     case TARGET_NR_setuid32:
11725         return get_errno(sys_setuid(arg1));
11726 #endif
11727 #ifdef TARGET_NR_setgid32
11728     case TARGET_NR_setgid32:
11729         return get_errno(sys_setgid(arg1));
11730 #endif
11731 #ifdef TARGET_NR_setfsuid32
11732     case TARGET_NR_setfsuid32:
11733         return get_errno(setfsuid(arg1));
11734 #endif
11735 #ifdef TARGET_NR_setfsgid32
11736     case TARGET_NR_setfsgid32:
11737         return get_errno(setfsgid(arg1));
11738 #endif
11739 #ifdef TARGET_NR_mincore
11740     case TARGET_NR_mincore:
11741         {
11742             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11743             if (!a) {
11744                 return -TARGET_ENOMEM;
11745             }
11746             p = lock_user_string(arg3);
11747             if (!p) {
11748                 ret = -TARGET_EFAULT;
11749             } else {
11750                 ret = get_errno(mincore(a, arg2, p));
11751                 unlock_user(p, arg3, ret);
11752             }
11753             unlock_user(a, arg1, 0);
11754         }
11755         return ret;
11756 #endif
11757 #ifdef TARGET_NR_arm_fadvise64_64
11758     case TARGET_NR_arm_fadvise64_64:
11759         /* arm_fadvise64_64 looks like fadvise64_64 but
11760          * with different argument order: fd, advice, offset, len
11761          * rather than the usual fd, offset, len, advice.
11762          * Note that offset and len are both 64-bit so appear as
11763          * pairs of 32-bit registers.
11764          */
11765         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11766                             target_offset64(arg5, arg6), arg2);
11767         return -host_to_target_errno(ret);
11768 #endif
11769 
11770 #if TARGET_ABI_BITS == 32
11771 
11772 #ifdef TARGET_NR_fadvise64_64
11773     case TARGET_NR_fadvise64_64:
11774 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11775         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11776         ret = arg2;
11777         arg2 = arg3;
11778         arg3 = arg4;
11779         arg4 = arg5;
11780         arg5 = arg6;
11781         arg6 = ret;
11782 #else
11783         /* 6 args: fd, offset (high, low), len (high, low), advice */
11784         if (regpairs_aligned(cpu_env, num)) {
11785             /* offset is in (3,4), len in (5,6) and advice in 7 */
11786             arg2 = arg3;
11787             arg3 = arg4;
11788             arg4 = arg5;
11789             arg5 = arg6;
11790             arg6 = arg7;
11791         }
11792 #endif
11793         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11794                             target_offset64(arg4, arg5), arg6);
11795         return -host_to_target_errno(ret);
11796 #endif
11797 
11798 #ifdef TARGET_NR_fadvise64
11799     case TARGET_NR_fadvise64:
11800         /* 5 args: fd, offset (high, low), len, advice */
11801         if (regpairs_aligned(cpu_env, num)) {
11802             /* offset is in (3,4), len in 5 and advice in 6 */
11803             arg2 = arg3;
11804             arg3 = arg4;
11805             arg4 = arg5;
11806             arg5 = arg6;
11807         }
11808         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11809         return -host_to_target_errno(ret);
11810 #endif
11811 
11812 #else /* not a 32-bit ABI */
11813 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11814 #ifdef TARGET_NR_fadvise64_64
11815     case TARGET_NR_fadvise64_64:
11816 #endif
11817 #ifdef TARGET_NR_fadvise64
11818     case TARGET_NR_fadvise64:
11819 #endif
11820 #ifdef TARGET_S390X
11821         switch (arg4) {
11822         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11823         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11824         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11825         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11826         default: break;
11827         }
11828 #endif
11829         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11830 #endif
11831 #endif /* end of 64-bit ABI fadvise handling */
11832 
11833 #ifdef TARGET_NR_madvise
11834     case TARGET_NR_madvise:
11835         /* A straight passthrough may not be safe because qemu sometimes
11836            turns private file-backed mappings into anonymous mappings.
11837            This will break MADV_DONTNEED.
11838            This is a hint, so ignoring and returning success is ok.  */
11839         return 0;
11840 #endif
11841 #ifdef TARGET_NR_fcntl64
11842     case TARGET_NR_fcntl64:
11843     {
11844         int cmd;
11845         struct flock64 fl;
11846         from_flock64_fn *copyfrom = copy_from_user_flock64;
11847         to_flock64_fn *copyto = copy_to_user_flock64;
11848 
11849 #ifdef TARGET_ARM
11850         if (!((CPUARMState *)cpu_env)->eabi) {
11851             copyfrom = copy_from_user_oabi_flock64;
11852             copyto = copy_to_user_oabi_flock64;
11853         }
11854 #endif
11855 
11856         cmd = target_to_host_fcntl_cmd(arg2);
11857         if (cmd == -TARGET_EINVAL) {
11858             return cmd;
11859         }
11860 
11861         switch(arg2) {
11862         case TARGET_F_GETLK64:
11863             ret = copyfrom(&fl, arg3);
11864             if (ret) {
11865                 break;
11866             }
11867             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11868             if (ret == 0) {
11869                 ret = copyto(arg3, &fl);
11870             }
11871 	    break;
11872 
11873         case TARGET_F_SETLK64:
11874         case TARGET_F_SETLKW64:
11875             ret = copyfrom(&fl, arg3);
11876             if (ret) {
11877                 break;
11878             }
11879             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11880 	    break;
11881         default:
11882             ret = do_fcntl(arg1, arg2, arg3);
11883             break;
11884         }
11885         return ret;
11886     }
11887 #endif
11888 #ifdef TARGET_NR_cacheflush
11889     case TARGET_NR_cacheflush:
11890         /* self-modifying code is handled automatically, so nothing needed */
11891         return 0;
11892 #endif
11893 #ifdef TARGET_NR_getpagesize
11894     case TARGET_NR_getpagesize:
11895         return TARGET_PAGE_SIZE;
11896 #endif
11897     case TARGET_NR_gettid:
11898         return get_errno(sys_gettid());
11899 #ifdef TARGET_NR_readahead
11900     case TARGET_NR_readahead:
11901 #if TARGET_ABI_BITS == 32
11902         if (regpairs_aligned(cpu_env, num)) {
11903             arg2 = arg3;
11904             arg3 = arg4;
11905             arg4 = arg5;
11906         }
11907         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11908 #else
11909         ret = get_errno(readahead(arg1, arg2, arg3));
11910 #endif
11911         return ret;
11912 #endif
11913 #ifdef CONFIG_ATTR
11914 #ifdef TARGET_NR_setxattr
11915     case TARGET_NR_listxattr:
11916     case TARGET_NR_llistxattr:
11917     {
11918         void *p, *b = 0;
11919         if (arg2) {
11920             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11921             if (!b) {
11922                 return -TARGET_EFAULT;
11923             }
11924         }
11925         p = lock_user_string(arg1);
11926         if (p) {
11927             if (num == TARGET_NR_listxattr) {
11928                 ret = get_errno(listxattr(p, b, arg3));
11929             } else {
11930                 ret = get_errno(llistxattr(p, b, arg3));
11931             }
11932         } else {
11933             ret = -TARGET_EFAULT;
11934         }
11935         unlock_user(p, arg1, 0);
11936         unlock_user(b, arg2, arg3);
11937         return ret;
11938     }
11939     case TARGET_NR_flistxattr:
11940     {
11941         void *b = 0;
11942         if (arg2) {
11943             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11944             if (!b) {
11945                 return -TARGET_EFAULT;
11946             }
11947         }
11948         ret = get_errno(flistxattr(arg1, b, arg3));
11949         unlock_user(b, arg2, arg3);
11950         return ret;
11951     }
11952     case TARGET_NR_setxattr:
11953     case TARGET_NR_lsetxattr:
11954         {
11955             void *p, *n, *v = 0;
11956             if (arg3) {
11957                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11958                 if (!v) {
11959                     return -TARGET_EFAULT;
11960                 }
11961             }
11962             p = lock_user_string(arg1);
11963             n = lock_user_string(arg2);
11964             if (p && n) {
11965                 if (num == TARGET_NR_setxattr) {
11966                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11967                 } else {
11968                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11969                 }
11970             } else {
11971                 ret = -TARGET_EFAULT;
11972             }
11973             unlock_user(p, arg1, 0);
11974             unlock_user(n, arg2, 0);
11975             unlock_user(v, arg3, 0);
11976         }
11977         return ret;
11978     case TARGET_NR_fsetxattr:
11979         {
11980             void *n, *v = 0;
11981             if (arg3) {
11982                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11983                 if (!v) {
11984                     return -TARGET_EFAULT;
11985                 }
11986             }
11987             n = lock_user_string(arg2);
11988             if (n) {
11989                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11990             } else {
11991                 ret = -TARGET_EFAULT;
11992             }
11993             unlock_user(n, arg2, 0);
11994             unlock_user(v, arg3, 0);
11995         }
11996         return ret;
11997     case TARGET_NR_getxattr:
11998     case TARGET_NR_lgetxattr:
11999         {
12000             void *p, *n, *v = 0;
12001             if (arg3) {
12002                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12003                 if (!v) {
12004                     return -TARGET_EFAULT;
12005                 }
12006             }
12007             p = lock_user_string(arg1);
12008             n = lock_user_string(arg2);
12009             if (p && n) {
12010                 if (num == TARGET_NR_getxattr) {
12011                     ret = get_errno(getxattr(p, n, v, arg4));
12012                 } else {
12013                     ret = get_errno(lgetxattr(p, n, v, arg4));
12014                 }
12015             } else {
12016                 ret = -TARGET_EFAULT;
12017             }
12018             unlock_user(p, arg1, 0);
12019             unlock_user(n, arg2, 0);
12020             unlock_user(v, arg3, arg4);
12021         }
12022         return ret;
12023     case TARGET_NR_fgetxattr:
12024         {
12025             void *n, *v = 0;
12026             if (arg3) {
12027                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12028                 if (!v) {
12029                     return -TARGET_EFAULT;
12030                 }
12031             }
12032             n = lock_user_string(arg2);
12033             if (n) {
12034                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12035             } else {
12036                 ret = -TARGET_EFAULT;
12037             }
12038             unlock_user(n, arg2, 0);
12039             unlock_user(v, arg3, arg4);
12040         }
12041         return ret;
12042     case TARGET_NR_removexattr:
12043     case TARGET_NR_lremovexattr:
12044         {
12045             void *p, *n;
12046             p = lock_user_string(arg1);
12047             n = lock_user_string(arg2);
12048             if (p && n) {
12049                 if (num == TARGET_NR_removexattr) {
12050                     ret = get_errno(removexattr(p, n));
12051                 } else {
12052                     ret = get_errno(lremovexattr(p, n));
12053                 }
12054             } else {
12055                 ret = -TARGET_EFAULT;
12056             }
12057             unlock_user(p, arg1, 0);
12058             unlock_user(n, arg2, 0);
12059         }
12060         return ret;
12061     case TARGET_NR_fremovexattr:
12062         {
12063             void *n;
12064             n = lock_user_string(arg2);
12065             if (n) {
12066                 ret = get_errno(fremovexattr(arg1, n));
12067             } else {
12068                 ret = -TARGET_EFAULT;
12069             }
12070             unlock_user(n, arg2, 0);
12071         }
12072         return ret;
12073 #endif
12074 #endif /* CONFIG_ATTR */
12075 #ifdef TARGET_NR_set_thread_area
12076     case TARGET_NR_set_thread_area:
12077 #if defined(TARGET_MIPS)
12078       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12079       return 0;
12080 #elif defined(TARGET_CRIS)
12081       if (arg1 & 0xff)
12082           ret = -TARGET_EINVAL;
12083       else {
12084           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12085           ret = 0;
12086       }
12087       return ret;
12088 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12089       return do_set_thread_area(cpu_env, arg1);
12090 #elif defined(TARGET_M68K)
12091       {
12092           TaskState *ts = cpu->opaque;
12093           ts->tp_value = arg1;
12094           return 0;
12095       }
12096 #else
12097       return -TARGET_ENOSYS;
12098 #endif
12099 #endif
12100 #ifdef TARGET_NR_get_thread_area
12101     case TARGET_NR_get_thread_area:
12102 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12103         return do_get_thread_area(cpu_env, arg1);
12104 #elif defined(TARGET_M68K)
12105         {
12106             TaskState *ts = cpu->opaque;
12107             return ts->tp_value;
12108         }
12109 #else
12110         return -TARGET_ENOSYS;
12111 #endif
12112 #endif
12113 #ifdef TARGET_NR_getdomainname
12114     case TARGET_NR_getdomainname:
12115         return -TARGET_ENOSYS;
12116 #endif
12117 
12118 #ifdef TARGET_NR_clock_settime
12119     case TARGET_NR_clock_settime:
12120     {
12121         struct timespec ts;
12122 
12123         ret = target_to_host_timespec(&ts, arg2);
12124         if (!is_error(ret)) {
12125             ret = get_errno(clock_settime(arg1, &ts));
12126         }
12127         return ret;
12128     }
12129 #endif
12130 #ifdef TARGET_NR_clock_settime64
12131     case TARGET_NR_clock_settime64:
12132     {
12133         struct timespec ts;
12134 
12135         ret = target_to_host_timespec64(&ts, arg2);
12136         if (!is_error(ret)) {
12137             ret = get_errno(clock_settime(arg1, &ts));
12138         }
12139         return ret;
12140     }
12141 #endif
12142 #ifdef TARGET_NR_clock_gettime
12143     case TARGET_NR_clock_gettime:
12144     {
12145         struct timespec ts;
12146         ret = get_errno(clock_gettime(arg1, &ts));
12147         if (!is_error(ret)) {
12148             ret = host_to_target_timespec(arg2, &ts);
12149         }
12150         return ret;
12151     }
12152 #endif
12153 #ifdef TARGET_NR_clock_gettime64
12154     case TARGET_NR_clock_gettime64:
12155     {
12156         struct timespec ts;
12157         ret = get_errno(clock_gettime(arg1, &ts));
12158         if (!is_error(ret)) {
12159             ret = host_to_target_timespec64(arg2, &ts);
12160         }
12161         return ret;
12162     }
12163 #endif
12164 #ifdef TARGET_NR_clock_getres
12165     case TARGET_NR_clock_getres:
12166     {
12167         struct timespec ts;
12168         ret = get_errno(clock_getres(arg1, &ts));
12169         if (!is_error(ret)) {
12170             host_to_target_timespec(arg2, &ts);
12171         }
12172         return ret;
12173     }
12174 #endif
12175 #ifdef TARGET_NR_clock_getres_time64
12176     case TARGET_NR_clock_getres_time64:
12177     {
12178         struct timespec ts;
12179         ret = get_errno(clock_getres(arg1, &ts));
12180         if (!is_error(ret)) {
12181             host_to_target_timespec64(arg2, &ts);
12182         }
12183         return ret;
12184     }
12185 #endif
12186 #ifdef TARGET_NR_clock_nanosleep
12187     case TARGET_NR_clock_nanosleep:
12188     {
12189         struct timespec ts;
12190         if (target_to_host_timespec(&ts, arg3)) {
12191             return -TARGET_EFAULT;
12192         }
12193         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12194                                              &ts, arg4 ? &ts : NULL));
12195         /*
12196          * if the call is interrupted by a signal handler, it fails
12197          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12198          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12199          */
12200         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12201             host_to_target_timespec(arg4, &ts)) {
12202               return -TARGET_EFAULT;
12203         }
12204 
12205         return ret;
12206     }
12207 #endif
12208 #ifdef TARGET_NR_clock_nanosleep_time64
12209     case TARGET_NR_clock_nanosleep_time64:
12210     {
12211         struct timespec ts;
12212 
12213         if (target_to_host_timespec64(&ts, arg3)) {
12214             return -TARGET_EFAULT;
12215         }
12216 
12217         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12218                                              &ts, arg4 ? &ts : NULL));
12219 
12220         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12221             host_to_target_timespec64(arg4, &ts)) {
12222             return -TARGET_EFAULT;
12223         }
12224         return ret;
12225     }
12226 #endif
12227 
12228 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12229     case TARGET_NR_set_tid_address:
12230         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12231 #endif
12232 
12233     case TARGET_NR_tkill:
12234         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12235 
12236     case TARGET_NR_tgkill:
12237         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12238                          target_to_host_signal(arg3)));
12239 
12240 #ifdef TARGET_NR_set_robust_list
12241     case TARGET_NR_set_robust_list:
12242     case TARGET_NR_get_robust_list:
12243         /* The ABI for supporting robust futexes has userspace pass
12244          * the kernel a pointer to a linked list which is updated by
12245          * userspace after the syscall; the list is walked by the kernel
12246          * when the thread exits. Since the linked list in QEMU guest
12247          * memory isn't a valid linked list for the host and we have
12248          * no way to reliably intercept the thread-death event, we can't
12249          * support these. Silently return ENOSYS so that guest userspace
12250          * falls back to a non-robust futex implementation (which should
12251          * be OK except in the corner case of the guest crashing while
12252          * holding a mutex that is shared with another process via
12253          * shared memory).
12254          */
12255         return -TARGET_ENOSYS;
12256 #endif
12257 
12258 #if defined(TARGET_NR_utimensat)
12259     case TARGET_NR_utimensat:
12260         {
12261             struct timespec *tsp, ts[2];
12262             if (!arg3) {
12263                 tsp = NULL;
12264             } else {
12265                 if (target_to_host_timespec(ts, arg3)) {
12266                     return -TARGET_EFAULT;
12267                 }
12268                 if (target_to_host_timespec(ts + 1, arg3 +
12269                                             sizeof(struct target_timespec))) {
12270                     return -TARGET_EFAULT;
12271                 }
12272                 tsp = ts;
12273             }
12274             if (!arg2)
12275                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12276             else {
12277                 if (!(p = lock_user_string(arg2))) {
12278                     return -TARGET_EFAULT;
12279                 }
12280                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12281                 unlock_user(p, arg2, 0);
12282             }
12283         }
12284         return ret;
12285 #endif
12286 #ifdef TARGET_NR_utimensat_time64
12287     case TARGET_NR_utimensat_time64:
12288         {
12289             struct timespec *tsp, ts[2];
12290             if (!arg3) {
12291                 tsp = NULL;
12292             } else {
12293                 if (target_to_host_timespec64(ts, arg3)) {
12294                     return -TARGET_EFAULT;
12295                 }
12296                 if (target_to_host_timespec64(ts + 1, arg3 +
12297                                      sizeof(struct target__kernel_timespec))) {
12298                     return -TARGET_EFAULT;
12299                 }
12300                 tsp = ts;
12301             }
12302             if (!arg2)
12303                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12304             else {
12305                 p = lock_user_string(arg2);
12306                 if (!p) {
12307                     return -TARGET_EFAULT;
12308                 }
12309                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12310                 unlock_user(p, arg2, 0);
12311             }
12312         }
12313         return ret;
12314 #endif
12315 #ifdef TARGET_NR_futex
12316     case TARGET_NR_futex:
12317         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12318 #endif
12319 #ifdef TARGET_NR_futex_time64
12320     case TARGET_NR_futex_time64:
12321         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12322 #endif
12323 #ifdef CONFIG_INOTIFY
12324 #if defined(TARGET_NR_inotify_init)
12325     case TARGET_NR_inotify_init:
12326         ret = get_errno(inotify_init());
12327         if (ret >= 0) {
12328             fd_trans_register(ret, &target_inotify_trans);
12329         }
12330         return ret;
12331 #endif
12332 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12333     case TARGET_NR_inotify_init1:
12334         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12335                                           fcntl_flags_tbl)));
12336         if (ret >= 0) {
12337             fd_trans_register(ret, &target_inotify_trans);
12338         }
12339         return ret;
12340 #endif
12341 #if defined(TARGET_NR_inotify_add_watch)
12342     case TARGET_NR_inotify_add_watch:
12343         p = lock_user_string(arg2);
12344         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12345         unlock_user(p, arg2, 0);
12346         return ret;
12347 #endif
12348 #if defined(TARGET_NR_inotify_rm_watch)
12349     case TARGET_NR_inotify_rm_watch:
12350         return get_errno(inotify_rm_watch(arg1, arg2));
12351 #endif
12352 #endif
12353 
12354 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12355     case TARGET_NR_mq_open:
12356         {
12357             struct mq_attr posix_mq_attr;
12358             struct mq_attr *pposix_mq_attr;
12359             int host_flags;
12360 
12361             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12362             pposix_mq_attr = NULL;
12363             if (arg4) {
12364                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12365                     return -TARGET_EFAULT;
12366                 }
12367                 pposix_mq_attr = &posix_mq_attr;
12368             }
12369             p = lock_user_string(arg1 - 1);
12370             if (!p) {
12371                 return -TARGET_EFAULT;
12372             }
12373             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12374             unlock_user (p, arg1, 0);
12375         }
12376         return ret;
12377 
12378     case TARGET_NR_mq_unlink:
12379         p = lock_user_string(arg1 - 1);
12380         if (!p) {
12381             return -TARGET_EFAULT;
12382         }
12383         ret = get_errno(mq_unlink(p));
12384         unlock_user (p, arg1, 0);
12385         return ret;
12386 
12387 #ifdef TARGET_NR_mq_timedsend
12388     case TARGET_NR_mq_timedsend:
12389         {
12390             struct timespec ts;
12391 
12392             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12393             if (arg5 != 0) {
12394                 if (target_to_host_timespec(&ts, arg5)) {
12395                     return -TARGET_EFAULT;
12396                 }
12397                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12398                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12399                     return -TARGET_EFAULT;
12400                 }
12401             } else {
12402                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12403             }
12404             unlock_user (p, arg2, arg3);
12405         }
12406         return ret;
12407 #endif
12408 #ifdef TARGET_NR_mq_timedsend_time64
12409     case TARGET_NR_mq_timedsend_time64:
12410         {
12411             struct timespec ts;
12412 
12413             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12414             if (arg5 != 0) {
12415                 if (target_to_host_timespec64(&ts, arg5)) {
12416                     return -TARGET_EFAULT;
12417                 }
12418                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12419                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12420                     return -TARGET_EFAULT;
12421                 }
12422             } else {
12423                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12424             }
12425             unlock_user(p, arg2, arg3);
12426         }
12427         return ret;
12428 #endif
12429 
12430 #ifdef TARGET_NR_mq_timedreceive
12431     case TARGET_NR_mq_timedreceive:
12432         {
12433             struct timespec ts;
12434             unsigned int prio;
12435 
12436             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12437             if (arg5 != 0) {
12438                 if (target_to_host_timespec(&ts, arg5)) {
12439                     return -TARGET_EFAULT;
12440                 }
12441                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12442                                                      &prio, &ts));
12443                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12444                     return -TARGET_EFAULT;
12445                 }
12446             } else {
12447                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12448                                                      &prio, NULL));
12449             }
12450             unlock_user (p, arg2, arg3);
12451             if (arg4 != 0)
12452                 put_user_u32(prio, arg4);
12453         }
12454         return ret;
12455 #endif
12456 #ifdef TARGET_NR_mq_timedreceive_time64
12457     case TARGET_NR_mq_timedreceive_time64:
12458         {
12459             struct timespec ts;
12460             unsigned int prio;
12461 
12462             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12463             if (arg5 != 0) {
12464                 if (target_to_host_timespec64(&ts, arg5)) {
12465                     return -TARGET_EFAULT;
12466                 }
12467                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12468                                                      &prio, &ts));
12469                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12470                     return -TARGET_EFAULT;
12471                 }
12472             } else {
12473                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12474                                                      &prio, NULL));
12475             }
12476             unlock_user(p, arg2, arg3);
12477             if (arg4 != 0) {
12478                 put_user_u32(prio, arg4);
12479             }
12480         }
12481         return ret;
12482 #endif
12483 
12484     /* Not implemented for now... */
12485 /*     case TARGET_NR_mq_notify: */
12486 /*         break; */
12487 
12488     case TARGET_NR_mq_getsetattr:
12489         {
12490             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12491             ret = 0;
12492             if (arg2 != 0) {
12493                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12494                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12495                                            &posix_mq_attr_out));
12496             } else if (arg3 != 0) {
12497                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12498             }
12499             if (ret == 0 && arg3 != 0) {
12500                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12501             }
12502         }
12503         return ret;
12504 #endif
12505 
12506 #ifdef CONFIG_SPLICE
12507 #ifdef TARGET_NR_tee
12508     case TARGET_NR_tee:
12509         {
12510             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12511         }
12512         return ret;
12513 #endif
12514 #ifdef TARGET_NR_splice
12515     case TARGET_NR_splice:
12516         {
12517             loff_t loff_in, loff_out;
12518             loff_t *ploff_in = NULL, *ploff_out = NULL;
12519             if (arg2) {
12520                 if (get_user_u64(loff_in, arg2)) {
12521                     return -TARGET_EFAULT;
12522                 }
12523                 ploff_in = &loff_in;
12524             }
12525             if (arg4) {
12526                 if (get_user_u64(loff_out, arg4)) {
12527                     return -TARGET_EFAULT;
12528                 }
12529                 ploff_out = &loff_out;
12530             }
12531             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12532             if (arg2) {
12533                 if (put_user_u64(loff_in, arg2)) {
12534                     return -TARGET_EFAULT;
12535                 }
12536             }
12537             if (arg4) {
12538                 if (put_user_u64(loff_out, arg4)) {
12539                     return -TARGET_EFAULT;
12540                 }
12541             }
12542         }
12543         return ret;
12544 #endif
12545 #ifdef TARGET_NR_vmsplice
12546 	case TARGET_NR_vmsplice:
12547         {
12548             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12549             if (vec != NULL) {
12550                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12551                 unlock_iovec(vec, arg2, arg3, 0);
12552             } else {
12553                 ret = -host_to_target_errno(errno);
12554             }
12555         }
12556         return ret;
12557 #endif
12558 #endif /* CONFIG_SPLICE */
12559 #ifdef CONFIG_EVENTFD
12560 #if defined(TARGET_NR_eventfd)
12561     case TARGET_NR_eventfd:
12562         ret = get_errno(eventfd(arg1, 0));
12563         if (ret >= 0) {
12564             fd_trans_register(ret, &target_eventfd_trans);
12565         }
12566         return ret;
12567 #endif
12568 #if defined(TARGET_NR_eventfd2)
12569     case TARGET_NR_eventfd2:
12570     {
12571         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12572         if (arg2 & TARGET_O_NONBLOCK) {
12573             host_flags |= O_NONBLOCK;
12574         }
12575         if (arg2 & TARGET_O_CLOEXEC) {
12576             host_flags |= O_CLOEXEC;
12577         }
12578         ret = get_errno(eventfd(arg1, host_flags));
12579         if (ret >= 0) {
12580             fd_trans_register(ret, &target_eventfd_trans);
12581         }
12582         return ret;
12583     }
12584 #endif
12585 #endif /* CONFIG_EVENTFD  */
12586 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12587     case TARGET_NR_fallocate:
12588 #if TARGET_ABI_BITS == 32
12589         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12590                                   target_offset64(arg5, arg6)));
12591 #else
12592         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12593 #endif
12594         return ret;
12595 #endif
12596 #if defined(CONFIG_SYNC_FILE_RANGE)
12597 #if defined(TARGET_NR_sync_file_range)
12598     case TARGET_NR_sync_file_range:
12599 #if TARGET_ABI_BITS == 32
12600 #if defined(TARGET_MIPS)
12601         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12602                                         target_offset64(arg5, arg6), arg7));
12603 #else
12604         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12605                                         target_offset64(arg4, arg5), arg6));
12606 #endif /* !TARGET_MIPS */
12607 #else
12608         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12609 #endif
12610         return ret;
12611 #endif
12612 #if defined(TARGET_NR_sync_file_range2) || \
12613     defined(TARGET_NR_arm_sync_file_range)
12614 #if defined(TARGET_NR_sync_file_range2)
12615     case TARGET_NR_sync_file_range2:
12616 #endif
12617 #if defined(TARGET_NR_arm_sync_file_range)
12618     case TARGET_NR_arm_sync_file_range:
12619 #endif
12620         /* This is like sync_file_range but the arguments are reordered */
12621 #if TARGET_ABI_BITS == 32
12622         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12623                                         target_offset64(arg5, arg6), arg2));
12624 #else
12625         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12626 #endif
12627         return ret;
12628 #endif
12629 #endif
12630 #if defined(TARGET_NR_signalfd4)
12631     case TARGET_NR_signalfd4:
12632         return do_signalfd4(arg1, arg2, arg4);
12633 #endif
12634 #if defined(TARGET_NR_signalfd)
12635     case TARGET_NR_signalfd:
12636         return do_signalfd4(arg1, arg2, 0);
12637 #endif
12638 #if defined(CONFIG_EPOLL)
12639 #if defined(TARGET_NR_epoll_create)
12640     case TARGET_NR_epoll_create:
12641         return get_errno(epoll_create(arg1));
12642 #endif
12643 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12644     case TARGET_NR_epoll_create1:
12645         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12646 #endif
12647 #if defined(TARGET_NR_epoll_ctl)
12648     case TARGET_NR_epoll_ctl:
12649     {
12650         struct epoll_event ep;
12651         struct epoll_event *epp = 0;
12652         if (arg4) {
12653             if (arg2 != EPOLL_CTL_DEL) {
12654                 struct target_epoll_event *target_ep;
12655                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12656                     return -TARGET_EFAULT;
12657                 }
12658                 ep.events = tswap32(target_ep->events);
12659                 /*
12660                  * The epoll_data_t union is just opaque data to the kernel,
12661                  * so we transfer all 64 bits across and need not worry what
12662                  * actual data type it is.
12663                  */
12664                 ep.data.u64 = tswap64(target_ep->data.u64);
12665                 unlock_user_struct(target_ep, arg4, 0);
12666             }
12667             /*
12668              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12669              * non-null pointer, even though this argument is ignored.
12670              *
12671              */
12672             epp = &ep;
12673         }
12674         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12675     }
12676 #endif
12677 
12678 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12679 #if defined(TARGET_NR_epoll_wait)
12680     case TARGET_NR_epoll_wait:
12681 #endif
12682 #if defined(TARGET_NR_epoll_pwait)
12683     case TARGET_NR_epoll_pwait:
12684 #endif
12685     {
12686         struct target_epoll_event *target_ep;
12687         struct epoll_event *ep;
12688         int epfd = arg1;
12689         int maxevents = arg3;
12690         int timeout = arg4;
12691 
12692         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12693             return -TARGET_EINVAL;
12694         }
12695 
12696         target_ep = lock_user(VERIFY_WRITE, arg2,
12697                               maxevents * sizeof(struct target_epoll_event), 1);
12698         if (!target_ep) {
12699             return -TARGET_EFAULT;
12700         }
12701 
12702         ep = g_try_new(struct epoll_event, maxevents);
12703         if (!ep) {
12704             unlock_user(target_ep, arg2, 0);
12705             return -TARGET_ENOMEM;
12706         }
12707 
12708         switch (num) {
12709 #if defined(TARGET_NR_epoll_pwait)
12710         case TARGET_NR_epoll_pwait:
12711         {
12712             target_sigset_t *target_set;
12713             sigset_t _set, *set = &_set;
12714 
12715             if (arg5) {
12716                 if (arg6 != sizeof(target_sigset_t)) {
12717                     ret = -TARGET_EINVAL;
12718                     break;
12719                 }
12720 
12721                 target_set = lock_user(VERIFY_READ, arg5,
12722                                        sizeof(target_sigset_t), 1);
12723                 if (!target_set) {
12724                     ret = -TARGET_EFAULT;
12725                     break;
12726                 }
12727                 target_to_host_sigset(set, target_set);
12728                 unlock_user(target_set, arg5, 0);
12729             } else {
12730                 set = NULL;
12731             }
12732 
12733             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12734                                              set, SIGSET_T_SIZE));
12735             break;
12736         }
12737 #endif
12738 #if defined(TARGET_NR_epoll_wait)
12739         case TARGET_NR_epoll_wait:
12740             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12741                                              NULL, 0));
12742             break;
12743 #endif
12744         default:
12745             ret = -TARGET_ENOSYS;
12746         }
12747         if (!is_error(ret)) {
12748             int i;
12749             for (i = 0; i < ret; i++) {
12750                 target_ep[i].events = tswap32(ep[i].events);
12751                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12752             }
12753             unlock_user(target_ep, arg2,
12754                         ret * sizeof(struct target_epoll_event));
12755         } else {
12756             unlock_user(target_ep, arg2, 0);
12757         }
12758         g_free(ep);
12759         return ret;
12760     }
12761 #endif
12762 #endif
12763 #ifdef TARGET_NR_prlimit64
12764     case TARGET_NR_prlimit64:
12765     {
12766         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12767         struct target_rlimit64 *target_rnew, *target_rold;
12768         struct host_rlimit64 rnew, rold, *rnewp = 0;
12769         int resource = target_to_host_resource(arg2);
12770 
12771         if (arg3 && (resource != RLIMIT_AS &&
12772                      resource != RLIMIT_DATA &&
12773                      resource != RLIMIT_STACK)) {
12774             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12775                 return -TARGET_EFAULT;
12776             }
12777             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12778             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12779             unlock_user_struct(target_rnew, arg3, 0);
12780             rnewp = &rnew;
12781         }
12782 
12783         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12784         if (!is_error(ret) && arg4) {
12785             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12786                 return -TARGET_EFAULT;
12787             }
12788             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12789             target_rold->rlim_max = tswap64(rold.rlim_max);
12790             unlock_user_struct(target_rold, arg4, 1);
12791         }
12792         return ret;
12793     }
12794 #endif
12795 #ifdef TARGET_NR_gethostname
12796     case TARGET_NR_gethostname:
12797     {
12798         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12799         if (name) {
12800             ret = get_errno(gethostname(name, arg2));
12801             unlock_user(name, arg1, arg2);
12802         } else {
12803             ret = -TARGET_EFAULT;
12804         }
12805         return ret;
12806     }
12807 #endif
12808 #ifdef TARGET_NR_atomic_cmpxchg_32
12809     case TARGET_NR_atomic_cmpxchg_32:
12810     {
12811         /* should use start_exclusive from main.c */
12812         abi_ulong mem_value;
12813         if (get_user_u32(mem_value, arg6)) {
12814             target_siginfo_t info;
12815             info.si_signo = SIGSEGV;
12816             info.si_errno = 0;
12817             info.si_code = TARGET_SEGV_MAPERR;
12818             info._sifields._sigfault._addr = arg6;
12819             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12820                          QEMU_SI_FAULT, &info);
12821             ret = 0xdeadbeef;
12822 
12823         }
12824         if (mem_value == arg2)
12825             put_user_u32(arg1, arg6);
12826         return mem_value;
12827     }
12828 #endif
12829 #ifdef TARGET_NR_atomic_barrier
12830     case TARGET_NR_atomic_barrier:
12831         /* Like the kernel implementation and the
12832            qemu arm barrier, no-op this? */
12833         return 0;
12834 #endif
12835 
12836 #ifdef TARGET_NR_timer_create
12837     case TARGET_NR_timer_create:
12838     {
12839         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12840 
12841         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12842 
12843         int clkid = arg1;
12844         int timer_index = next_free_host_timer();
12845 
12846         if (timer_index < 0) {
12847             ret = -TARGET_EAGAIN;
12848         } else {
12849             timer_t *phtimer = g_posix_timers  + timer_index;
12850 
12851             if (arg2) {
12852                 phost_sevp = &host_sevp;
12853                 ret = target_to_host_sigevent(phost_sevp, arg2);
12854                 if (ret != 0) {
12855                     return ret;
12856                 }
12857             }
12858 
12859             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12860             if (ret) {
12861                 phtimer = NULL;
12862             } else {
12863                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12864                     return -TARGET_EFAULT;
12865                 }
12866             }
12867         }
12868         return ret;
12869     }
12870 #endif
12871 
12872 #ifdef TARGET_NR_timer_settime
12873     case TARGET_NR_timer_settime:
12874     {
12875         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12876          * struct itimerspec * old_value */
12877         target_timer_t timerid = get_timer_id(arg1);
12878 
12879         if (timerid < 0) {
12880             ret = timerid;
12881         } else if (arg3 == 0) {
12882             ret = -TARGET_EINVAL;
12883         } else {
12884             timer_t htimer = g_posix_timers[timerid];
12885             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12886 
12887             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12888                 return -TARGET_EFAULT;
12889             }
12890             ret = get_errno(
12891                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12892             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12893                 return -TARGET_EFAULT;
12894             }
12895         }
12896         return ret;
12897     }
12898 #endif
12899 
12900 #ifdef TARGET_NR_timer_settime64
12901     case TARGET_NR_timer_settime64:
12902     {
12903         target_timer_t timerid = get_timer_id(arg1);
12904 
12905         if (timerid < 0) {
12906             ret = timerid;
12907         } else if (arg3 == 0) {
12908             ret = -TARGET_EINVAL;
12909         } else {
12910             timer_t htimer = g_posix_timers[timerid];
12911             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12912 
12913             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12914                 return -TARGET_EFAULT;
12915             }
12916             ret = get_errno(
12917                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12918             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12919                 return -TARGET_EFAULT;
12920             }
12921         }
12922         return ret;
12923     }
12924 #endif
12925 
12926 #ifdef TARGET_NR_timer_gettime
12927     case TARGET_NR_timer_gettime:
12928     {
12929         /* args: timer_t timerid, struct itimerspec *curr_value */
12930         target_timer_t timerid = get_timer_id(arg1);
12931 
12932         if (timerid < 0) {
12933             ret = timerid;
12934         } else if (!arg2) {
12935             ret = -TARGET_EFAULT;
12936         } else {
12937             timer_t htimer = g_posix_timers[timerid];
12938             struct itimerspec hspec;
12939             ret = get_errno(timer_gettime(htimer, &hspec));
12940 
12941             if (host_to_target_itimerspec(arg2, &hspec)) {
12942                 ret = -TARGET_EFAULT;
12943             }
12944         }
12945         return ret;
12946     }
12947 #endif
12948 
12949 #ifdef TARGET_NR_timer_gettime64
12950     case TARGET_NR_timer_gettime64:
12951     {
12952         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12953         target_timer_t timerid = get_timer_id(arg1);
12954 
12955         if (timerid < 0) {
12956             ret = timerid;
12957         } else if (!arg2) {
12958             ret = -TARGET_EFAULT;
12959         } else {
12960             timer_t htimer = g_posix_timers[timerid];
12961             struct itimerspec hspec;
12962             ret = get_errno(timer_gettime(htimer, &hspec));
12963 
12964             if (host_to_target_itimerspec64(arg2, &hspec)) {
12965                 ret = -TARGET_EFAULT;
12966             }
12967         }
12968         return ret;
12969     }
12970 #endif
12971 
12972 #ifdef TARGET_NR_timer_getoverrun
12973     case TARGET_NR_timer_getoverrun:
12974     {
12975         /* args: timer_t timerid */
12976         target_timer_t timerid = get_timer_id(arg1);
12977 
12978         if (timerid < 0) {
12979             ret = timerid;
12980         } else {
12981             timer_t htimer = g_posix_timers[timerid];
12982             ret = get_errno(timer_getoverrun(htimer));
12983         }
12984         return ret;
12985     }
12986 #endif
12987 
12988 #ifdef TARGET_NR_timer_delete
12989     case TARGET_NR_timer_delete:
12990     {
12991         /* args: timer_t timerid */
12992         target_timer_t timerid = get_timer_id(arg1);
12993 
12994         if (timerid < 0) {
12995             ret = timerid;
12996         } else {
12997             timer_t htimer = g_posix_timers[timerid];
12998             ret = get_errno(timer_delete(htimer));
12999             g_posix_timers[timerid] = 0;
13000         }
13001         return ret;
13002     }
13003 #endif
13004 
13005 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13006     case TARGET_NR_timerfd_create:
13007         return get_errno(timerfd_create(arg1,
13008                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13009 #endif
13010 
13011 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13012     case TARGET_NR_timerfd_gettime:
13013         {
13014             struct itimerspec its_curr;
13015 
13016             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13017 
13018             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13019                 return -TARGET_EFAULT;
13020             }
13021         }
13022         return ret;
13023 #endif
13024 
13025 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13026     case TARGET_NR_timerfd_gettime64:
13027         {
13028             struct itimerspec its_curr;
13029 
13030             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13031 
13032             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13033                 return -TARGET_EFAULT;
13034             }
13035         }
13036         return ret;
13037 #endif
13038 
13039 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13040     case TARGET_NR_timerfd_settime:
13041         {
13042             struct itimerspec its_new, its_old, *p_new;
13043 
13044             if (arg3) {
13045                 if (target_to_host_itimerspec(&its_new, arg3)) {
13046                     return -TARGET_EFAULT;
13047                 }
13048                 p_new = &its_new;
13049             } else {
13050                 p_new = NULL;
13051             }
13052 
13053             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13054 
13055             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13056                 return -TARGET_EFAULT;
13057             }
13058         }
13059         return ret;
13060 #endif
13061 
13062 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13063     case TARGET_NR_timerfd_settime64:
13064         {
13065             struct itimerspec its_new, its_old, *p_new;
13066 
13067             if (arg3) {
13068                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13069                     return -TARGET_EFAULT;
13070                 }
13071                 p_new = &its_new;
13072             } else {
13073                 p_new = NULL;
13074             }
13075 
13076             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13077 
13078             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13079                 return -TARGET_EFAULT;
13080             }
13081         }
13082         return ret;
13083 #endif
13084 
13085 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13086     case TARGET_NR_ioprio_get:
13087         return get_errno(ioprio_get(arg1, arg2));
13088 #endif
13089 
13090 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13091     case TARGET_NR_ioprio_set:
13092         return get_errno(ioprio_set(arg1, arg2, arg3));
13093 #endif
13094 
13095 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13096     case TARGET_NR_setns:
13097         return get_errno(setns(arg1, arg2));
13098 #endif
13099 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13100     case TARGET_NR_unshare:
13101         return get_errno(unshare(arg1));
13102 #endif
13103 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13104     case TARGET_NR_kcmp:
13105         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13106 #endif
13107 #ifdef TARGET_NR_swapcontext
13108     case TARGET_NR_swapcontext:
13109         /* PowerPC specific.  */
13110         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13111 #endif
13112 #ifdef TARGET_NR_memfd_create
13113     case TARGET_NR_memfd_create:
13114         p = lock_user_string(arg1);
13115         if (!p) {
13116             return -TARGET_EFAULT;
13117         }
13118         ret = get_errno(memfd_create(p, arg2));
13119         fd_trans_unregister(ret);
13120         unlock_user(p, arg1, 0);
13121         return ret;
13122 #endif
13123 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13124     case TARGET_NR_membarrier:
13125         return get_errno(membarrier(arg1, arg2));
13126 #endif
13127 
13128 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13129     case TARGET_NR_copy_file_range:
13130         {
13131             loff_t inoff, outoff;
13132             loff_t *pinoff = NULL, *poutoff = NULL;
13133 
13134             if (arg2) {
13135                 if (get_user_u64(inoff, arg2)) {
13136                     return -TARGET_EFAULT;
13137                 }
13138                 pinoff = &inoff;
13139             }
13140             if (arg4) {
13141                 if (get_user_u64(outoff, arg4)) {
13142                     return -TARGET_EFAULT;
13143                 }
13144                 poutoff = &outoff;
13145             }
13146             /* Do not sign-extend the count parameter. */
13147             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13148                                                  (abi_ulong)arg5, arg6));
13149             if (!is_error(ret) && ret > 0) {
13150                 if (arg2) {
13151                     if (put_user_u64(inoff, arg2)) {
13152                         return -TARGET_EFAULT;
13153                     }
13154                 }
13155                 if (arg4) {
13156                     if (put_user_u64(outoff, arg4)) {
13157                         return -TARGET_EFAULT;
13158                     }
13159                 }
13160             }
13161         }
13162         return ret;
13163 #endif
13164 
13165 #if defined(TARGET_NR_pivot_root)
13166     case TARGET_NR_pivot_root:
13167         {
13168             void *p2;
13169             p = lock_user_string(arg1); /* new_root */
13170             p2 = lock_user_string(arg2); /* put_old */
13171             if (!p || !p2) {
13172                 ret = -TARGET_EFAULT;
13173             } else {
13174                 ret = get_errno(pivot_root(p, p2));
13175             }
13176             unlock_user(p2, arg2, 0);
13177             unlock_user(p, arg1, 0);
13178         }
13179         return ret;
13180 #endif
13181 
13182     default:
13183         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13184         return -TARGET_ENOSYS;
13185     }
13186     return ret;
13187 }
13188 
13189 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13190                     abi_long arg2, abi_long arg3, abi_long arg4,
13191                     abi_long arg5, abi_long arg6, abi_long arg7,
13192                     abi_long arg8)
13193 {
13194     CPUState *cpu = env_cpu(cpu_env);
13195     abi_long ret;
13196 
13197 #ifdef DEBUG_ERESTARTSYS
13198     /* Debug-only code for exercising the syscall-restart code paths
13199      * in the per-architecture cpu main loops: restart every syscall
13200      * the guest makes once before letting it through.
13201      */
13202     {
13203         static bool flag;
13204         flag = !flag;
13205         if (flag) {
13206             return -QEMU_ERESTARTSYS;
13207         }
13208     }
13209 #endif
13210 
13211     record_syscall_start(cpu, num, arg1,
13212                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13213 
13214     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13215         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13216     }
13217 
13218     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13219                       arg5, arg6, arg7, arg8);
13220 
13221     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13222         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13223                           arg3, arg4, arg5, arg6);
13224     }
13225 
13226     record_syscall_return(cpu, num, ret);
13227     return ret;
13228 }
13229