xref: /openbmc/qemu/linux-user/syscall.c (revision 87e9bf23)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_inotify_init __NR_inotify_init
276 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
277 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
278 #define __NR_sys_statx __NR_statx
279 
280 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
281 #define __NR__llseek __NR_lseek
282 #endif
283 
284 /* Newer kernel ports have llseek() instead of _llseek() */
285 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
286 #define TARGET_NR__llseek TARGET_NR_llseek
287 #endif
288 
289 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
290 #ifndef TARGET_O_NONBLOCK_MASK
291 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
292 #endif
293 
294 #define __NR_sys_gettid __NR_gettid
295 _syscall0(int, sys_gettid)
296 
297 /* For the 64-bit guest on 32-bit host case we must emulate
298  * getdents using getdents64, because otherwise the host
299  * might hand us back more dirent records than we can fit
300  * into the guest buffer after structure format conversion.
301  * Otherwise we emulate getdents with getdents if the host has it.
302  */
303 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
304 #define EMULATE_GETDENTS_WITH_GETDENTS
305 #endif
306 
307 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
308 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
309 #endif
310 #if (defined(TARGET_NR_getdents) && \
311       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
312     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
313 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
314 #endif
315 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
316 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
317           loff_t *, res, uint, wh);
318 #endif
319 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
320 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
321           siginfo_t *, uinfo)
322 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
323 #ifdef __NR_exit_group
324 _syscall1(int,exit_group,int,error_code)
325 #endif
326 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
327 _syscall1(int,set_tid_address,int *,tidptr)
328 #endif
329 #if defined(__NR_futex)
330 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
331           const struct timespec *,timeout,int *,uaddr2,int,val3)
332 #endif
333 #if defined(__NR_futex_time64)
334 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
335           const struct timespec *,timeout,int *,uaddr2,int,val3)
336 #endif
337 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
338 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
341 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
342           unsigned long *, user_mask_ptr);
343 #define __NR_sys_getcpu __NR_getcpu
344 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
345 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
346           void *, arg);
347 _syscall2(int, capget, struct __user_cap_header_struct *, header,
348           struct __user_cap_data_struct *, data);
349 _syscall2(int, capset, struct __user_cap_header_struct *, header,
350           struct __user_cap_data_struct *, data);
351 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
352 _syscall2(int, ioprio_get, int, which, int, who)
353 #endif
354 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
355 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
356 #endif
357 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
358 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
359 #endif
360 
361 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
362 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
363           unsigned long, idx1, unsigned long, idx2)
364 #endif
365 
366 /*
367  * It is assumed that struct statx is architecture independent.
368  */
369 #if defined(TARGET_NR_statx) && defined(__NR_statx)
370 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
371           unsigned int, mask, struct target_statx *, statxbuf)
372 #endif
373 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
374 _syscall2(int, membarrier, int, cmd, int, flags)
375 #endif
376 
377 static const bitmask_transtbl fcntl_flags_tbl[] = {
378   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
379   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
380   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
381   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
382   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
383   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
384   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
385   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
386   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
387   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
388   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
389   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
390   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
391 #if defined(O_DIRECT)
392   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
393 #endif
394 #if defined(O_NOATIME)
395   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
396 #endif
397 #if defined(O_CLOEXEC)
398   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
399 #endif
400 #if defined(O_PATH)
401   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
402 #endif
403 #if defined(O_TMPFILE)
404   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
405 #endif
406   /* Don't terminate the list prematurely on 64-bit host+guest.  */
407 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
408   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
409 #endif
410   { 0, 0, 0, 0 }
411 };
412 
413 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
414 
415 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
416 #if defined(__NR_utimensat)
417 #define __NR_sys_utimensat __NR_utimensat
418 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
419           const struct timespec *,tsp,int,flags)
420 #else
421 static int sys_utimensat(int dirfd, const char *pathname,
422                          const struct timespec times[2], int flags)
423 {
424     errno = ENOSYS;
425     return -1;
426 }
427 #endif
428 #endif /* TARGET_NR_utimensat */
429 
430 #ifdef TARGET_NR_renameat2
431 #if defined(__NR_renameat2)
432 #define __NR_sys_renameat2 __NR_renameat2
433 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
434           const char *, new, unsigned int, flags)
435 #else
436 static int sys_renameat2(int oldfd, const char *old,
437                          int newfd, const char *new, int flags)
438 {
439     if (flags == 0) {
440         return renameat(oldfd, old, newfd, new);
441     }
442     errno = ENOSYS;
443     return -1;
444 }
445 #endif
446 #endif /* TARGET_NR_renameat2 */
447 
448 #ifdef CONFIG_INOTIFY
449 #include <sys/inotify.h>
450 
451 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
452 static int sys_inotify_init(void)
453 {
454   return (inotify_init());
455 }
456 #endif
457 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
458 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
459 {
460   return (inotify_add_watch(fd, pathname, mask));
461 }
462 #endif
463 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
464 static int sys_inotify_rm_watch(int fd, int32_t wd)
465 {
466   return (inotify_rm_watch(fd, wd));
467 }
468 #endif
469 #ifdef CONFIG_INOTIFY1
470 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
471 static int sys_inotify_init1(int flags)
472 {
473   return (inotify_init1(flags));
474 }
475 #endif
476 #endif
477 #else
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY  */
484 
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
488 #endif
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64 {
492     uint64_t rlim_cur;
493     uint64_t rlim_max;
494 };
495 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
496           const struct host_rlimit64 *, new_limit,
497           struct host_rlimit64 *, old_limit)
498 #endif
499 
500 
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers[32] = { 0, } ;
504 
505 static inline int next_free_host_timer(void)
506 {
507     int k ;
508     /* FIXME: Does finding the next free slot require a lock? */
509     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
510         if (g_posix_timers[k] == 0) {
511             g_posix_timers[k] = (timer_t) 1;
512             return k;
513         }
514     }
515     return -1;
516 }
517 #endif
518 
519 static inline int host_to_target_errno(int host_errno)
520 {
521     switch (host_errno) {
522 #define E(X)  case X: return TARGET_##X;
523 #include "errnos.c.inc"
524 #undef E
525     default:
526         return host_errno;
527     }
528 }
529 
530 static inline int target_to_host_errno(int target_errno)
531 {
532     switch (target_errno) {
533 #define E(X)  case TARGET_##X: return X;
534 #include "errnos.c.inc"
535 #undef E
536     default:
537         return target_errno;
538     }
539 }
540 
541 static inline abi_long get_errno(abi_long ret)
542 {
543     if (ret == -1)
544         return -host_to_target_errno(errno);
545     else
546         return ret;
547 }
548 
549 const char *target_strerror(int err)
550 {
551     if (err == QEMU_ERESTARTSYS) {
552         return "To be restarted";
553     }
554     if (err == QEMU_ESIGRETURN) {
555         return "Successful exit from sigreturn";
556     }
557 
558     return strerror(target_to_host_errno(err));
559 }
560 
561 #define safe_syscall0(type, name) \
562 static type safe_##name(void) \
563 { \
564     return safe_syscall(__NR_##name); \
565 }
566 
567 #define safe_syscall1(type, name, type1, arg1) \
568 static type safe_##name(type1 arg1) \
569 { \
570     return safe_syscall(__NR_##name, arg1); \
571 }
572 
573 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
574 static type safe_##name(type1 arg1, type2 arg2) \
575 { \
576     return safe_syscall(__NR_##name, arg1, arg2); \
577 }
578 
579 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
580 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
581 { \
582     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
583 }
584 
585 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
586     type4, arg4) \
587 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
588 { \
589     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
590 }
591 
592 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
593     type4, arg4, type5, arg5) \
594 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
595     type5 arg5) \
596 { \
597     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
598 }
599 
600 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
601     type4, arg4, type5, arg5, type6, arg6) \
602 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
603     type5 arg5, type6 arg6) \
604 { \
605     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
606 }
607 
608 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
609 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
610 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
611               int, flags, mode_t, mode)
612 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
613 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
614               struct rusage *, rusage)
615 #endif
616 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
617               int, options, struct rusage *, rusage)
618 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
619 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
620     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
621 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
622               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
623 #endif
624 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
625 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
626               struct timespec *, tsp, const sigset_t *, sigmask,
627               size_t, sigsetsize)
628 #endif
629 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
630               int, maxevents, int, timeout, const sigset_t *, sigmask,
631               size_t, sigsetsize)
632 #if defined(__NR_futex)
633 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
634               const struct timespec *,timeout,int *,uaddr2,int,val3)
635 #endif
636 #if defined(__NR_futex_time64)
637 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
638               const struct timespec *,timeout,int *,uaddr2,int,val3)
639 #endif
640 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
641 safe_syscall2(int, kill, pid_t, pid, int, sig)
642 safe_syscall2(int, tkill, int, tid, int, sig)
643 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
644 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
645 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
646 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
647               unsigned long, pos_l, unsigned long, pos_h)
648 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
649               unsigned long, pos_l, unsigned long, pos_h)
650 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
651               socklen_t, addrlen)
652 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
653               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
654 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
655               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
656 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
657 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
658 safe_syscall2(int, flock, int, fd, int, operation)
659 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
660 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
661               const struct timespec *, uts, size_t, sigsetsize)
662 #endif
663 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
664               int, flags)
665 #if defined(TARGET_NR_nanosleep)
666 safe_syscall2(int, nanosleep, const struct timespec *, req,
667               struct timespec *, rem)
668 #endif
669 #if defined(TARGET_NR_clock_nanosleep) || \
670     defined(TARGET_NR_clock_nanosleep_time64)
671 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
672               const struct timespec *, req, struct timespec *, rem)
673 #endif
674 #ifdef __NR_ipc
675 #ifdef __s390x__
676 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
677               void *, ptr)
678 #else
679 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
680               void *, ptr, long, fifth)
681 #endif
682 #endif
683 #ifdef __NR_msgsnd
684 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
685               int, flags)
686 #endif
687 #ifdef __NR_msgrcv
688 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
689               long, msgtype, int, flags)
690 #endif
691 #ifdef __NR_semtimedop
692 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
693               unsigned, nsops, const struct timespec *, timeout)
694 #endif
695 #if defined(TARGET_NR_mq_timedsend) || \
696     defined(TARGET_NR_mq_timedsend_time64)
697 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
698               size_t, len, unsigned, prio, const struct timespec *, timeout)
699 #endif
700 #if defined(TARGET_NR_mq_timedreceive) || \
701     defined(TARGET_NR_mq_timedreceive_time64)
702 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
703               size_t, len, unsigned *, prio, const struct timespec *, timeout)
704 #endif
705 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
706 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
707               int, outfd, loff_t *, poutoff, size_t, length,
708               unsigned int, flags)
709 #endif
710 
711 /* We do ioctl like this rather than via safe_syscall3 to preserve the
712  * "third argument might be integer or pointer or not present" behaviour of
713  * the libc function.
714  */
715 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
716 /* Similarly for fcntl. Note that callers must always:
717  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
718  *  use the flock64 struct rather than unsuffixed flock
719  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
720  */
721 #ifdef __NR_fcntl64
722 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
723 #else
724 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
725 #endif
726 
727 static inline int host_to_target_sock_type(int host_type)
728 {
729     int target_type;
730 
731     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
732     case SOCK_DGRAM:
733         target_type = TARGET_SOCK_DGRAM;
734         break;
735     case SOCK_STREAM:
736         target_type = TARGET_SOCK_STREAM;
737         break;
738     default:
739         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
740         break;
741     }
742 
743 #if defined(SOCK_CLOEXEC)
744     if (host_type & SOCK_CLOEXEC) {
745         target_type |= TARGET_SOCK_CLOEXEC;
746     }
747 #endif
748 
749 #if defined(SOCK_NONBLOCK)
750     if (host_type & SOCK_NONBLOCK) {
751         target_type |= TARGET_SOCK_NONBLOCK;
752     }
753 #endif
754 
755     return target_type;
756 }
757 
758 static abi_ulong target_brk;
759 static abi_ulong target_original_brk;
760 static abi_ulong brk_page;
761 
762 void target_set_brk(abi_ulong new_brk)
763 {
764     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
765     brk_page = HOST_PAGE_ALIGN(target_brk);
766 }
767 
768 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
769 #define DEBUGF_BRK(message, args...)
770 
771 /* do_brk() must return target values and target errnos. */
772 abi_long do_brk(abi_ulong new_brk)
773 {
774     abi_long mapped_addr;
775     abi_ulong new_alloc_size;
776 
777     /* brk pointers are always untagged */
778 
779     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
780 
781     if (!new_brk) {
782         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
783         return target_brk;
784     }
785     if (new_brk < target_original_brk) {
786         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
787                    target_brk);
788         return target_brk;
789     }
790 
791     /* If the new brk is less than the highest page reserved to the
792      * target heap allocation, set it and we're almost done...  */
793     if (new_brk <= brk_page) {
794         /* Heap contents are initialized to zero, as for anonymous
795          * mapped pages.  */
796         if (new_brk > target_brk) {
797             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
798         }
799 	target_brk = new_brk;
800         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
801 	return target_brk;
802     }
803 
804     /* We need to allocate more memory after the brk... Note that
805      * we don't use MAP_FIXED because that will map over the top of
806      * any existing mapping (like the one with the host libc or qemu
807      * itself); instead we treat "mapped but at wrong address" as
808      * a failure and unmap again.
809      */
810     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
811     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
812                                         PROT_READ|PROT_WRITE,
813                                         MAP_ANON|MAP_PRIVATE, 0, 0));
814 
815     if (mapped_addr == brk_page) {
816         /* Heap contents are initialized to zero, as for anonymous
817          * mapped pages.  Technically the new pages are already
818          * initialized to zero since they *are* anonymous mapped
819          * pages, however we have to take care with the contents that
820          * come from the remaining part of the previous page: it may
821          * contains garbage data due to a previous heap usage (grown
822          * then shrunken).  */
823         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
824 
825         target_brk = new_brk;
826         brk_page = HOST_PAGE_ALIGN(target_brk);
827         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
828             target_brk);
829         return target_brk;
830     } else if (mapped_addr != -1) {
831         /* Mapped but at wrong address, meaning there wasn't actually
832          * enough space for this brk.
833          */
834         target_munmap(mapped_addr, new_alloc_size);
835         mapped_addr = -1;
836         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
837     }
838     else {
839         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
840     }
841 
842 #if defined(TARGET_ALPHA)
843     /* We (partially) emulate OSF/1 on Alpha, which requires we
844        return a proper errno, not an unchanged brk value.  */
845     return -TARGET_ENOMEM;
846 #endif
847     /* For everything else, return the previous break. */
848     return target_brk;
849 }
850 
851 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
852     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
853 static inline abi_long copy_from_user_fdset(fd_set *fds,
854                                             abi_ulong target_fds_addr,
855                                             int n)
856 {
857     int i, nw, j, k;
858     abi_ulong b, *target_fds;
859 
860     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
861     if (!(target_fds = lock_user(VERIFY_READ,
862                                  target_fds_addr,
863                                  sizeof(abi_ulong) * nw,
864                                  1)))
865         return -TARGET_EFAULT;
866 
867     FD_ZERO(fds);
868     k = 0;
869     for (i = 0; i < nw; i++) {
870         /* grab the abi_ulong */
871         __get_user(b, &target_fds[i]);
872         for (j = 0; j < TARGET_ABI_BITS; j++) {
873             /* check the bit inside the abi_ulong */
874             if ((b >> j) & 1)
875                 FD_SET(k, fds);
876             k++;
877         }
878     }
879 
880     unlock_user(target_fds, target_fds_addr, 0);
881 
882     return 0;
883 }
884 
885 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
886                                                  abi_ulong target_fds_addr,
887                                                  int n)
888 {
889     if (target_fds_addr) {
890         if (copy_from_user_fdset(fds, target_fds_addr, n))
891             return -TARGET_EFAULT;
892         *fds_ptr = fds;
893     } else {
894         *fds_ptr = NULL;
895     }
896     return 0;
897 }
898 
899 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
900                                           const fd_set *fds,
901                                           int n)
902 {
903     int i, nw, j, k;
904     abi_long v;
905     abi_ulong *target_fds;
906 
907     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
908     if (!(target_fds = lock_user(VERIFY_WRITE,
909                                  target_fds_addr,
910                                  sizeof(abi_ulong) * nw,
911                                  0)))
912         return -TARGET_EFAULT;
913 
914     k = 0;
915     for (i = 0; i < nw; i++) {
916         v = 0;
917         for (j = 0; j < TARGET_ABI_BITS; j++) {
918             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
919             k++;
920         }
921         __put_user(v, &target_fds[i]);
922     }
923 
924     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
925 
926     return 0;
927 }
928 #endif
929 
930 #if defined(__alpha__)
931 #define HOST_HZ 1024
932 #else
933 #define HOST_HZ 100
934 #endif
935 
936 static inline abi_long host_to_target_clock_t(long ticks)
937 {
938 #if HOST_HZ == TARGET_HZ
939     return ticks;
940 #else
941     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
942 #endif
943 }
944 
945 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
946                                              const struct rusage *rusage)
947 {
948     struct target_rusage *target_rusage;
949 
950     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
951         return -TARGET_EFAULT;
952     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
953     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
954     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
955     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
956     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
957     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
958     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
959     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
960     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
961     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
962     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
963     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
964     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
965     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
966     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
967     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
968     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
969     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
970     unlock_user_struct(target_rusage, target_addr, 1);
971 
972     return 0;
973 }
974 
975 #ifdef TARGET_NR_setrlimit
976 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
977 {
978     abi_ulong target_rlim_swap;
979     rlim_t result;
980 
981     target_rlim_swap = tswapal(target_rlim);
982     if (target_rlim_swap == TARGET_RLIM_INFINITY)
983         return RLIM_INFINITY;
984 
985     result = target_rlim_swap;
986     if (target_rlim_swap != (rlim_t)result)
987         return RLIM_INFINITY;
988 
989     return result;
990 }
991 #endif
992 
993 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
994 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
995 {
996     abi_ulong target_rlim_swap;
997     abi_ulong result;
998 
999     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1000         target_rlim_swap = TARGET_RLIM_INFINITY;
1001     else
1002         target_rlim_swap = rlim;
1003     result = tswapal(target_rlim_swap);
1004 
1005     return result;
1006 }
1007 #endif
1008 
1009 static inline int target_to_host_resource(int code)
1010 {
1011     switch (code) {
1012     case TARGET_RLIMIT_AS:
1013         return RLIMIT_AS;
1014     case TARGET_RLIMIT_CORE:
1015         return RLIMIT_CORE;
1016     case TARGET_RLIMIT_CPU:
1017         return RLIMIT_CPU;
1018     case TARGET_RLIMIT_DATA:
1019         return RLIMIT_DATA;
1020     case TARGET_RLIMIT_FSIZE:
1021         return RLIMIT_FSIZE;
1022     case TARGET_RLIMIT_LOCKS:
1023         return RLIMIT_LOCKS;
1024     case TARGET_RLIMIT_MEMLOCK:
1025         return RLIMIT_MEMLOCK;
1026     case TARGET_RLIMIT_MSGQUEUE:
1027         return RLIMIT_MSGQUEUE;
1028     case TARGET_RLIMIT_NICE:
1029         return RLIMIT_NICE;
1030     case TARGET_RLIMIT_NOFILE:
1031         return RLIMIT_NOFILE;
1032     case TARGET_RLIMIT_NPROC:
1033         return RLIMIT_NPROC;
1034     case TARGET_RLIMIT_RSS:
1035         return RLIMIT_RSS;
1036     case TARGET_RLIMIT_RTPRIO:
1037         return RLIMIT_RTPRIO;
1038     case TARGET_RLIMIT_SIGPENDING:
1039         return RLIMIT_SIGPENDING;
1040     case TARGET_RLIMIT_STACK:
1041         return RLIMIT_STACK;
1042     default:
1043         return code;
1044     }
1045 }
1046 
1047 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1048                                               abi_ulong target_tv_addr)
1049 {
1050     struct target_timeval *target_tv;
1051 
1052     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1053         return -TARGET_EFAULT;
1054     }
1055 
1056     __get_user(tv->tv_sec, &target_tv->tv_sec);
1057     __get_user(tv->tv_usec, &target_tv->tv_usec);
1058 
1059     unlock_user_struct(target_tv, target_tv_addr, 0);
1060 
1061     return 0;
1062 }
1063 
1064 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1065                                             const struct timeval *tv)
1066 {
1067     struct target_timeval *target_tv;
1068 
1069     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1070         return -TARGET_EFAULT;
1071     }
1072 
1073     __put_user(tv->tv_sec, &target_tv->tv_sec);
1074     __put_user(tv->tv_usec, &target_tv->tv_usec);
1075 
1076     unlock_user_struct(target_tv, target_tv_addr, 1);
1077 
1078     return 0;
1079 }
1080 
1081 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1082 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1083                                                 abi_ulong target_tv_addr)
1084 {
1085     struct target__kernel_sock_timeval *target_tv;
1086 
1087     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1088         return -TARGET_EFAULT;
1089     }
1090 
1091     __get_user(tv->tv_sec, &target_tv->tv_sec);
1092     __get_user(tv->tv_usec, &target_tv->tv_usec);
1093 
1094     unlock_user_struct(target_tv, target_tv_addr, 0);
1095 
1096     return 0;
1097 }
1098 #endif
1099 
1100 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1101                                               const struct timeval *tv)
1102 {
1103     struct target__kernel_sock_timeval *target_tv;
1104 
1105     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1106         return -TARGET_EFAULT;
1107     }
1108 
1109     __put_user(tv->tv_sec, &target_tv->tv_sec);
1110     __put_user(tv->tv_usec, &target_tv->tv_usec);
1111 
1112     unlock_user_struct(target_tv, target_tv_addr, 1);
1113 
1114     return 0;
1115 }
1116 
1117 #if defined(TARGET_NR_futex) || \
1118     defined(TARGET_NR_rt_sigtimedwait) || \
1119     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1120     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1121     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1122     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1123     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1124     defined(TARGET_NR_timer_settime) || \
1125     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1126 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1127                                                abi_ulong target_addr)
1128 {
1129     struct target_timespec *target_ts;
1130 
1131     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1132         return -TARGET_EFAULT;
1133     }
1134     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1135     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1136     unlock_user_struct(target_ts, target_addr, 0);
1137     return 0;
1138 }
1139 #endif
1140 
1141 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1142     defined(TARGET_NR_timer_settime64) || \
1143     defined(TARGET_NR_mq_timedsend_time64) || \
1144     defined(TARGET_NR_mq_timedreceive_time64) || \
1145     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1146     defined(TARGET_NR_clock_nanosleep_time64) || \
1147     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1148     defined(TARGET_NR_utimensat) || \
1149     defined(TARGET_NR_utimensat_time64) || \
1150     defined(TARGET_NR_semtimedop_time64) || \
1151     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1152 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1153                                                  abi_ulong target_addr)
1154 {
1155     struct target__kernel_timespec *target_ts;
1156 
1157     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1158         return -TARGET_EFAULT;
1159     }
1160     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1161     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1162     /* in 32bit mode, this drops the padding */
1163     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1164     unlock_user_struct(target_ts, target_addr, 0);
1165     return 0;
1166 }
1167 #endif
1168 
1169 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1170                                                struct timespec *host_ts)
1171 {
1172     struct target_timespec *target_ts;
1173 
1174     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1175         return -TARGET_EFAULT;
1176     }
1177     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1178     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179     unlock_user_struct(target_ts, target_addr, 1);
1180     return 0;
1181 }
1182 
1183 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1184                                                  struct timespec *host_ts)
1185 {
1186     struct target__kernel_timespec *target_ts;
1187 
1188     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1189         return -TARGET_EFAULT;
1190     }
1191     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1192     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1193     unlock_user_struct(target_ts, target_addr, 1);
1194     return 0;
1195 }
1196 
1197 #if defined(TARGET_NR_gettimeofday)
1198 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1199                                              struct timezone *tz)
1200 {
1201     struct target_timezone *target_tz;
1202 
1203     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1204         return -TARGET_EFAULT;
1205     }
1206 
1207     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1208     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1209 
1210     unlock_user_struct(target_tz, target_tz_addr, 1);
1211 
1212     return 0;
1213 }
1214 #endif
1215 
1216 #if defined(TARGET_NR_settimeofday)
1217 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1218                                                abi_ulong target_tz_addr)
1219 {
1220     struct target_timezone *target_tz;
1221 
1222     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1223         return -TARGET_EFAULT;
1224     }
1225 
1226     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1227     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1228 
1229     unlock_user_struct(target_tz, target_tz_addr, 0);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1236 #include <mqueue.h>
1237 
1238 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1239                                               abi_ulong target_mq_attr_addr)
1240 {
1241     struct target_mq_attr *target_mq_attr;
1242 
1243     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1244                           target_mq_attr_addr, 1))
1245         return -TARGET_EFAULT;
1246 
1247     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1248     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1249     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1250     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1251 
1252     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1253 
1254     return 0;
1255 }
1256 
1257 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1258                                             const struct mq_attr *attr)
1259 {
1260     struct target_mq_attr *target_mq_attr;
1261 
1262     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1263                           target_mq_attr_addr, 0))
1264         return -TARGET_EFAULT;
1265 
1266     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1267     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1268     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1269     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1270 
1271     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1272 
1273     return 0;
1274 }
1275 #endif
1276 
1277 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1278 /* do_select() must return target values and target errnos. */
1279 static abi_long do_select(int n,
1280                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1281                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1282 {
1283     fd_set rfds, wfds, efds;
1284     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1285     struct timeval tv;
1286     struct timespec ts, *ts_ptr;
1287     abi_long ret;
1288 
1289     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1290     if (ret) {
1291         return ret;
1292     }
1293     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1294     if (ret) {
1295         return ret;
1296     }
1297     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1298     if (ret) {
1299         return ret;
1300     }
1301 
1302     if (target_tv_addr) {
1303         if (copy_from_user_timeval(&tv, target_tv_addr))
1304             return -TARGET_EFAULT;
1305         ts.tv_sec = tv.tv_sec;
1306         ts.tv_nsec = tv.tv_usec * 1000;
1307         ts_ptr = &ts;
1308     } else {
1309         ts_ptr = NULL;
1310     }
1311 
1312     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1313                                   ts_ptr, NULL));
1314 
1315     if (!is_error(ret)) {
1316         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1317             return -TARGET_EFAULT;
1318         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1319             return -TARGET_EFAULT;
1320         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1321             return -TARGET_EFAULT;
1322 
1323         if (target_tv_addr) {
1324             tv.tv_sec = ts.tv_sec;
1325             tv.tv_usec = ts.tv_nsec / 1000;
1326             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1327                 return -TARGET_EFAULT;
1328             }
1329         }
1330     }
1331 
1332     return ret;
1333 }
1334 
1335 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1336 static abi_long do_old_select(abi_ulong arg1)
1337 {
1338     struct target_sel_arg_struct *sel;
1339     abi_ulong inp, outp, exp, tvp;
1340     long nsel;
1341 
1342     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1343         return -TARGET_EFAULT;
1344     }
1345 
1346     nsel = tswapal(sel->n);
1347     inp = tswapal(sel->inp);
1348     outp = tswapal(sel->outp);
1349     exp = tswapal(sel->exp);
1350     tvp = tswapal(sel->tvp);
1351 
1352     unlock_user_struct(sel, arg1, 0);
1353 
1354     return do_select(nsel, inp, outp, exp, tvp);
1355 }
1356 #endif
1357 #endif
1358 
1359 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1360 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1361                             abi_long arg4, abi_long arg5, abi_long arg6,
1362                             bool time64)
1363 {
1364     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1365     fd_set rfds, wfds, efds;
1366     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1367     struct timespec ts, *ts_ptr;
1368     abi_long ret;
1369 
1370     /*
1371      * The 6th arg is actually two args smashed together,
1372      * so we cannot use the C library.
1373      */
1374     sigset_t set;
1375     struct {
1376         sigset_t *set;
1377         size_t size;
1378     } sig, *sig_ptr;
1379 
1380     abi_ulong arg_sigset, arg_sigsize, *arg7;
1381     target_sigset_t *target_sigset;
1382 
1383     n = arg1;
1384     rfd_addr = arg2;
1385     wfd_addr = arg3;
1386     efd_addr = arg4;
1387     ts_addr = arg5;
1388 
1389     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1390     if (ret) {
1391         return ret;
1392     }
1393     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1394     if (ret) {
1395         return ret;
1396     }
1397     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1398     if (ret) {
1399         return ret;
1400     }
1401 
1402     /*
1403      * This takes a timespec, and not a timeval, so we cannot
1404      * use the do_select() helper ...
1405      */
1406     if (ts_addr) {
1407         if (time64) {
1408             if (target_to_host_timespec64(&ts, ts_addr)) {
1409                 return -TARGET_EFAULT;
1410             }
1411         } else {
1412             if (target_to_host_timespec(&ts, ts_addr)) {
1413                 return -TARGET_EFAULT;
1414             }
1415         }
1416             ts_ptr = &ts;
1417     } else {
1418         ts_ptr = NULL;
1419     }
1420 
1421     /* Extract the two packed args for the sigset */
1422     if (arg6) {
1423         sig_ptr = &sig;
1424         sig.size = SIGSET_T_SIZE;
1425 
1426         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1427         if (!arg7) {
1428             return -TARGET_EFAULT;
1429         }
1430         arg_sigset = tswapal(arg7[0]);
1431         arg_sigsize = tswapal(arg7[1]);
1432         unlock_user(arg7, arg6, 0);
1433 
1434         if (arg_sigset) {
1435             sig.set = &set;
1436             if (arg_sigsize != sizeof(*target_sigset)) {
1437                 /* Like the kernel, we enforce correct size sigsets */
1438                 return -TARGET_EINVAL;
1439             }
1440             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1441                                       sizeof(*target_sigset), 1);
1442             if (!target_sigset) {
1443                 return -TARGET_EFAULT;
1444             }
1445             target_to_host_sigset(&set, target_sigset);
1446             unlock_user(target_sigset, arg_sigset, 0);
1447         } else {
1448             sig.set = NULL;
1449         }
1450     } else {
1451         sig_ptr = NULL;
1452     }
1453 
1454     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1455                                   ts_ptr, sig_ptr));
1456 
1457     if (!is_error(ret)) {
1458         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1459             return -TARGET_EFAULT;
1460         }
1461         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1462             return -TARGET_EFAULT;
1463         }
1464         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1465             return -TARGET_EFAULT;
1466         }
1467         if (time64) {
1468             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1469                 return -TARGET_EFAULT;
1470             }
1471         } else {
1472             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1473                 return -TARGET_EFAULT;
1474             }
1475         }
1476     }
1477     return ret;
1478 }
1479 #endif
1480 
1481 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1482     defined(TARGET_NR_ppoll_time64)
1483 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1484                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1485 {
1486     struct target_pollfd *target_pfd;
1487     unsigned int nfds = arg2;
1488     struct pollfd *pfd;
1489     unsigned int i;
1490     abi_long ret;
1491 
1492     pfd = NULL;
1493     target_pfd = NULL;
1494     if (nfds) {
1495         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1496             return -TARGET_EINVAL;
1497         }
1498         target_pfd = lock_user(VERIFY_WRITE, arg1,
1499                                sizeof(struct target_pollfd) * nfds, 1);
1500         if (!target_pfd) {
1501             return -TARGET_EFAULT;
1502         }
1503 
1504         pfd = alloca(sizeof(struct pollfd) * nfds);
1505         for (i = 0; i < nfds; i++) {
1506             pfd[i].fd = tswap32(target_pfd[i].fd);
1507             pfd[i].events = tswap16(target_pfd[i].events);
1508         }
1509     }
1510     if (ppoll) {
1511         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1512         target_sigset_t *target_set;
1513         sigset_t _set, *set = &_set;
1514 
1515         if (arg3) {
1516             if (time64) {
1517                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1518                     unlock_user(target_pfd, arg1, 0);
1519                     return -TARGET_EFAULT;
1520                 }
1521             } else {
1522                 if (target_to_host_timespec(timeout_ts, arg3)) {
1523                     unlock_user(target_pfd, arg1, 0);
1524                     return -TARGET_EFAULT;
1525                 }
1526             }
1527         } else {
1528             timeout_ts = NULL;
1529         }
1530 
1531         if (arg4) {
1532             if (arg5 != sizeof(target_sigset_t)) {
1533                 unlock_user(target_pfd, arg1, 0);
1534                 return -TARGET_EINVAL;
1535             }
1536 
1537             target_set = lock_user(VERIFY_READ, arg4,
1538                                    sizeof(target_sigset_t), 1);
1539             if (!target_set) {
1540                 unlock_user(target_pfd, arg1, 0);
1541                 return -TARGET_EFAULT;
1542             }
1543             target_to_host_sigset(set, target_set);
1544         } else {
1545             set = NULL;
1546         }
1547 
1548         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1549                                    set, SIGSET_T_SIZE));
1550 
1551         if (!is_error(ret) && arg3) {
1552             if (time64) {
1553                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1554                     return -TARGET_EFAULT;
1555                 }
1556             } else {
1557                 if (host_to_target_timespec(arg3, timeout_ts)) {
1558                     return -TARGET_EFAULT;
1559                 }
1560             }
1561         }
1562         if (arg4) {
1563             unlock_user(target_set, arg4, 0);
1564         }
1565     } else {
1566           struct timespec ts, *pts;
1567 
1568           if (arg3 >= 0) {
1569               /* Convert ms to secs, ns */
1570               ts.tv_sec = arg3 / 1000;
1571               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1572               pts = &ts;
1573           } else {
1574               /* -ve poll() timeout means "infinite" */
1575               pts = NULL;
1576           }
1577           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1578     }
1579 
1580     if (!is_error(ret)) {
1581         for (i = 0; i < nfds; i++) {
1582             target_pfd[i].revents = tswap16(pfd[i].revents);
1583         }
1584     }
1585     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1586     return ret;
1587 }
1588 #endif
1589 
1590 static abi_long do_pipe2(int host_pipe[], int flags)
1591 {
1592 #ifdef CONFIG_PIPE2
1593     return pipe2(host_pipe, flags);
1594 #else
1595     return -ENOSYS;
1596 #endif
1597 }
1598 
1599 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1600                         int flags, int is_pipe2)
1601 {
1602     int host_pipe[2];
1603     abi_long ret;
1604     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1605 
1606     if (is_error(ret))
1607         return get_errno(ret);
1608 
1609     /* Several targets have special calling conventions for the original
1610        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1611     if (!is_pipe2) {
1612 #if defined(TARGET_ALPHA)
1613         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1614         return host_pipe[0];
1615 #elif defined(TARGET_MIPS)
1616         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1617         return host_pipe[0];
1618 #elif defined(TARGET_SH4)
1619         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1620         return host_pipe[0];
1621 #elif defined(TARGET_SPARC)
1622         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1623         return host_pipe[0];
1624 #endif
1625     }
1626 
1627     if (put_user_s32(host_pipe[0], pipedes)
1628         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1629         return -TARGET_EFAULT;
1630     return get_errno(ret);
1631 }
1632 
1633 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1634                                               abi_ulong target_addr,
1635                                               socklen_t len)
1636 {
1637     struct target_ip_mreqn *target_smreqn;
1638 
1639     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1640     if (!target_smreqn)
1641         return -TARGET_EFAULT;
1642     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1643     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1644     if (len == sizeof(struct target_ip_mreqn))
1645         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1646     unlock_user(target_smreqn, target_addr, 0);
1647 
1648     return 0;
1649 }
1650 
1651 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1652                                                abi_ulong target_addr,
1653                                                socklen_t len)
1654 {
1655     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1656     sa_family_t sa_family;
1657     struct target_sockaddr *target_saddr;
1658 
1659     if (fd_trans_target_to_host_addr(fd)) {
1660         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1661     }
1662 
1663     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1664     if (!target_saddr)
1665         return -TARGET_EFAULT;
1666 
1667     sa_family = tswap16(target_saddr->sa_family);
1668 
1669     /* Oops. The caller might send a incomplete sun_path; sun_path
1670      * must be terminated by \0 (see the manual page), but
1671      * unfortunately it is quite common to specify sockaddr_un
1672      * length as "strlen(x->sun_path)" while it should be
1673      * "strlen(...) + 1". We'll fix that here if needed.
1674      * Linux kernel has a similar feature.
1675      */
1676 
1677     if (sa_family == AF_UNIX) {
1678         if (len < unix_maxlen && len > 0) {
1679             char *cp = (char*)target_saddr;
1680 
1681             if ( cp[len-1] && !cp[len] )
1682                 len++;
1683         }
1684         if (len > unix_maxlen)
1685             len = unix_maxlen;
1686     }
1687 
1688     memcpy(addr, target_saddr, len);
1689     addr->sa_family = sa_family;
1690     if (sa_family == AF_NETLINK) {
1691         struct sockaddr_nl *nladdr;
1692 
1693         nladdr = (struct sockaddr_nl *)addr;
1694         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1695         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1696     } else if (sa_family == AF_PACKET) {
1697 	struct target_sockaddr_ll *lladdr;
1698 
1699 	lladdr = (struct target_sockaddr_ll *)addr;
1700 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1701 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1702     }
1703     unlock_user(target_saddr, target_addr, 0);
1704 
1705     return 0;
1706 }
1707 
1708 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1709                                                struct sockaddr *addr,
1710                                                socklen_t len)
1711 {
1712     struct target_sockaddr *target_saddr;
1713 
1714     if (len == 0) {
1715         return 0;
1716     }
1717     assert(addr);
1718 
1719     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1720     if (!target_saddr)
1721         return -TARGET_EFAULT;
1722     memcpy(target_saddr, addr, len);
1723     if (len >= offsetof(struct target_sockaddr, sa_family) +
1724         sizeof(target_saddr->sa_family)) {
1725         target_saddr->sa_family = tswap16(addr->sa_family);
1726     }
1727     if (addr->sa_family == AF_NETLINK &&
1728         len >= sizeof(struct target_sockaddr_nl)) {
1729         struct target_sockaddr_nl *target_nl =
1730                (struct target_sockaddr_nl *)target_saddr;
1731         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1732         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1733     } else if (addr->sa_family == AF_PACKET) {
1734         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1735         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1736         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1737     } else if (addr->sa_family == AF_INET6 &&
1738                len >= sizeof(struct target_sockaddr_in6)) {
1739         struct target_sockaddr_in6 *target_in6 =
1740                (struct target_sockaddr_in6 *)target_saddr;
1741         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1742     }
1743     unlock_user(target_saddr, target_addr, len);
1744 
1745     return 0;
1746 }
1747 
1748 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1749                                            struct target_msghdr *target_msgh)
1750 {
1751     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1752     abi_long msg_controllen;
1753     abi_ulong target_cmsg_addr;
1754     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1755     socklen_t space = 0;
1756 
1757     msg_controllen = tswapal(target_msgh->msg_controllen);
1758     if (msg_controllen < sizeof (struct target_cmsghdr))
1759         goto the_end;
1760     target_cmsg_addr = tswapal(target_msgh->msg_control);
1761     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1762     target_cmsg_start = target_cmsg;
1763     if (!target_cmsg)
1764         return -TARGET_EFAULT;
1765 
1766     while (cmsg && target_cmsg) {
1767         void *data = CMSG_DATA(cmsg);
1768         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1769 
1770         int len = tswapal(target_cmsg->cmsg_len)
1771             - sizeof(struct target_cmsghdr);
1772 
1773         space += CMSG_SPACE(len);
1774         if (space > msgh->msg_controllen) {
1775             space -= CMSG_SPACE(len);
1776             /* This is a QEMU bug, since we allocated the payload
1777              * area ourselves (unlike overflow in host-to-target
1778              * conversion, which is just the guest giving us a buffer
1779              * that's too small). It can't happen for the payload types
1780              * we currently support; if it becomes an issue in future
1781              * we would need to improve our allocation strategy to
1782              * something more intelligent than "twice the size of the
1783              * target buffer we're reading from".
1784              */
1785             qemu_log_mask(LOG_UNIMP,
1786                           ("Unsupported ancillary data %d/%d: "
1787                            "unhandled msg size\n"),
1788                           tswap32(target_cmsg->cmsg_level),
1789                           tswap32(target_cmsg->cmsg_type));
1790             break;
1791         }
1792 
1793         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1794             cmsg->cmsg_level = SOL_SOCKET;
1795         } else {
1796             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1797         }
1798         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1799         cmsg->cmsg_len = CMSG_LEN(len);
1800 
1801         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1802             int *fd = (int *)data;
1803             int *target_fd = (int *)target_data;
1804             int i, numfds = len / sizeof(int);
1805 
1806             for (i = 0; i < numfds; i++) {
1807                 __get_user(fd[i], target_fd + i);
1808             }
1809         } else if (cmsg->cmsg_level == SOL_SOCKET
1810                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1811             struct ucred *cred = (struct ucred *)data;
1812             struct target_ucred *target_cred =
1813                 (struct target_ucred *)target_data;
1814 
1815             __get_user(cred->pid, &target_cred->pid);
1816             __get_user(cred->uid, &target_cred->uid);
1817             __get_user(cred->gid, &target_cred->gid);
1818         } else {
1819             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1820                           cmsg->cmsg_level, cmsg->cmsg_type);
1821             memcpy(data, target_data, len);
1822         }
1823 
1824         cmsg = CMSG_NXTHDR(msgh, cmsg);
1825         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1826                                          target_cmsg_start);
1827     }
1828     unlock_user(target_cmsg, target_cmsg_addr, 0);
1829  the_end:
1830     msgh->msg_controllen = space;
1831     return 0;
1832 }
1833 
1834 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1835                                            struct msghdr *msgh)
1836 {
1837     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1838     abi_long msg_controllen;
1839     abi_ulong target_cmsg_addr;
1840     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1841     socklen_t space = 0;
1842 
1843     msg_controllen = tswapal(target_msgh->msg_controllen);
1844     if (msg_controllen < sizeof (struct target_cmsghdr))
1845         goto the_end;
1846     target_cmsg_addr = tswapal(target_msgh->msg_control);
1847     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1848     target_cmsg_start = target_cmsg;
1849     if (!target_cmsg)
1850         return -TARGET_EFAULT;
1851 
1852     while (cmsg && target_cmsg) {
1853         void *data = CMSG_DATA(cmsg);
1854         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1855 
1856         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1857         int tgt_len, tgt_space;
1858 
1859         /* We never copy a half-header but may copy half-data;
1860          * this is Linux's behaviour in put_cmsg(). Note that
1861          * truncation here is a guest problem (which we report
1862          * to the guest via the CTRUNC bit), unlike truncation
1863          * in target_to_host_cmsg, which is a QEMU bug.
1864          */
1865         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1866             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1867             break;
1868         }
1869 
1870         if (cmsg->cmsg_level == SOL_SOCKET) {
1871             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1872         } else {
1873             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1874         }
1875         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1876 
1877         /* Payload types which need a different size of payload on
1878          * the target must adjust tgt_len here.
1879          */
1880         tgt_len = len;
1881         switch (cmsg->cmsg_level) {
1882         case SOL_SOCKET:
1883             switch (cmsg->cmsg_type) {
1884             case SO_TIMESTAMP:
1885                 tgt_len = sizeof(struct target_timeval);
1886                 break;
1887             default:
1888                 break;
1889             }
1890             break;
1891         default:
1892             break;
1893         }
1894 
1895         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1896             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1897             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1898         }
1899 
1900         /* We must now copy-and-convert len bytes of payload
1901          * into tgt_len bytes of destination space. Bear in mind
1902          * that in both source and destination we may be dealing
1903          * with a truncated value!
1904          */
1905         switch (cmsg->cmsg_level) {
1906         case SOL_SOCKET:
1907             switch (cmsg->cmsg_type) {
1908             case SCM_RIGHTS:
1909             {
1910                 int *fd = (int *)data;
1911                 int *target_fd = (int *)target_data;
1912                 int i, numfds = tgt_len / sizeof(int);
1913 
1914                 for (i = 0; i < numfds; i++) {
1915                     __put_user(fd[i], target_fd + i);
1916                 }
1917                 break;
1918             }
1919             case SO_TIMESTAMP:
1920             {
1921                 struct timeval *tv = (struct timeval *)data;
1922                 struct target_timeval *target_tv =
1923                     (struct target_timeval *)target_data;
1924 
1925                 if (len != sizeof(struct timeval) ||
1926                     tgt_len != sizeof(struct target_timeval)) {
1927                     goto unimplemented;
1928                 }
1929 
1930                 /* copy struct timeval to target */
1931                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1932                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1933                 break;
1934             }
1935             case SCM_CREDENTIALS:
1936             {
1937                 struct ucred *cred = (struct ucred *)data;
1938                 struct target_ucred *target_cred =
1939                     (struct target_ucred *)target_data;
1940 
1941                 __put_user(cred->pid, &target_cred->pid);
1942                 __put_user(cred->uid, &target_cred->uid);
1943                 __put_user(cred->gid, &target_cred->gid);
1944                 break;
1945             }
1946             default:
1947                 goto unimplemented;
1948             }
1949             break;
1950 
1951         case SOL_IP:
1952             switch (cmsg->cmsg_type) {
1953             case IP_TTL:
1954             {
1955                 uint32_t *v = (uint32_t *)data;
1956                 uint32_t *t_int = (uint32_t *)target_data;
1957 
1958                 if (len != sizeof(uint32_t) ||
1959                     tgt_len != sizeof(uint32_t)) {
1960                     goto unimplemented;
1961                 }
1962                 __put_user(*v, t_int);
1963                 break;
1964             }
1965             case IP_RECVERR:
1966             {
1967                 struct errhdr_t {
1968                    struct sock_extended_err ee;
1969                    struct sockaddr_in offender;
1970                 };
1971                 struct errhdr_t *errh = (struct errhdr_t *)data;
1972                 struct errhdr_t *target_errh =
1973                     (struct errhdr_t *)target_data;
1974 
1975                 if (len != sizeof(struct errhdr_t) ||
1976                     tgt_len != sizeof(struct errhdr_t)) {
1977                     goto unimplemented;
1978                 }
1979                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1980                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1981                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1982                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1983                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1984                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1985                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1986                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1987                     (void *) &errh->offender, sizeof(errh->offender));
1988                 break;
1989             }
1990             default:
1991                 goto unimplemented;
1992             }
1993             break;
1994 
1995         case SOL_IPV6:
1996             switch (cmsg->cmsg_type) {
1997             case IPV6_HOPLIMIT:
1998             {
1999                 uint32_t *v = (uint32_t *)data;
2000                 uint32_t *t_int = (uint32_t *)target_data;
2001 
2002                 if (len != sizeof(uint32_t) ||
2003                     tgt_len != sizeof(uint32_t)) {
2004                     goto unimplemented;
2005                 }
2006                 __put_user(*v, t_int);
2007                 break;
2008             }
2009             case IPV6_RECVERR:
2010             {
2011                 struct errhdr6_t {
2012                    struct sock_extended_err ee;
2013                    struct sockaddr_in6 offender;
2014                 };
2015                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2016                 struct errhdr6_t *target_errh =
2017                     (struct errhdr6_t *)target_data;
2018 
2019                 if (len != sizeof(struct errhdr6_t) ||
2020                     tgt_len != sizeof(struct errhdr6_t)) {
2021                     goto unimplemented;
2022                 }
2023                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2024                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2025                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2026                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2027                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2028                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2029                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2030                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2031                     (void *) &errh->offender, sizeof(errh->offender));
2032                 break;
2033             }
2034             default:
2035                 goto unimplemented;
2036             }
2037             break;
2038 
2039         default:
2040         unimplemented:
2041             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2042                           cmsg->cmsg_level, cmsg->cmsg_type);
2043             memcpy(target_data, data, MIN(len, tgt_len));
2044             if (tgt_len > len) {
2045                 memset(target_data + len, 0, tgt_len - len);
2046             }
2047         }
2048 
2049         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2050         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2051         if (msg_controllen < tgt_space) {
2052             tgt_space = msg_controllen;
2053         }
2054         msg_controllen -= tgt_space;
2055         space += tgt_space;
2056         cmsg = CMSG_NXTHDR(msgh, cmsg);
2057         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2058                                          target_cmsg_start);
2059     }
2060     unlock_user(target_cmsg, target_cmsg_addr, space);
2061  the_end:
2062     target_msgh->msg_controllen = tswapal(space);
2063     return 0;
2064 }
2065 
2066 /* do_setsockopt() Must return target values and target errnos. */
2067 static abi_long do_setsockopt(int sockfd, int level, int optname,
2068                               abi_ulong optval_addr, socklen_t optlen)
2069 {
2070     abi_long ret;
2071     int val;
2072     struct ip_mreqn *ip_mreq;
2073     struct ip_mreq_source *ip_mreq_source;
2074 
2075     switch(level) {
2076     case SOL_TCP:
2077     case SOL_UDP:
2078         /* TCP and UDP options all take an 'int' value.  */
2079         if (optlen < sizeof(uint32_t))
2080             return -TARGET_EINVAL;
2081 
2082         if (get_user_u32(val, optval_addr))
2083             return -TARGET_EFAULT;
2084         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2085         break;
2086     case SOL_IP:
2087         switch(optname) {
2088         case IP_TOS:
2089         case IP_TTL:
2090         case IP_HDRINCL:
2091         case IP_ROUTER_ALERT:
2092         case IP_RECVOPTS:
2093         case IP_RETOPTS:
2094         case IP_PKTINFO:
2095         case IP_MTU_DISCOVER:
2096         case IP_RECVERR:
2097         case IP_RECVTTL:
2098         case IP_RECVTOS:
2099 #ifdef IP_FREEBIND
2100         case IP_FREEBIND:
2101 #endif
2102         case IP_MULTICAST_TTL:
2103         case IP_MULTICAST_LOOP:
2104             val = 0;
2105             if (optlen >= sizeof(uint32_t)) {
2106                 if (get_user_u32(val, optval_addr))
2107                     return -TARGET_EFAULT;
2108             } else if (optlen >= 1) {
2109                 if (get_user_u8(val, optval_addr))
2110                     return -TARGET_EFAULT;
2111             }
2112             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2113             break;
2114         case IP_ADD_MEMBERSHIP:
2115         case IP_DROP_MEMBERSHIP:
2116             if (optlen < sizeof (struct target_ip_mreq) ||
2117                 optlen > sizeof (struct target_ip_mreqn))
2118                 return -TARGET_EINVAL;
2119 
2120             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2121             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2122             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2123             break;
2124 
2125         case IP_BLOCK_SOURCE:
2126         case IP_UNBLOCK_SOURCE:
2127         case IP_ADD_SOURCE_MEMBERSHIP:
2128         case IP_DROP_SOURCE_MEMBERSHIP:
2129             if (optlen != sizeof (struct target_ip_mreq_source))
2130                 return -TARGET_EINVAL;
2131 
2132             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2133             if (!ip_mreq_source) {
2134                 return -TARGET_EFAULT;
2135             }
2136             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2137             unlock_user (ip_mreq_source, optval_addr, 0);
2138             break;
2139 
2140         default:
2141             goto unimplemented;
2142         }
2143         break;
2144     case SOL_IPV6:
2145         switch (optname) {
2146         case IPV6_MTU_DISCOVER:
2147         case IPV6_MTU:
2148         case IPV6_V6ONLY:
2149         case IPV6_RECVPKTINFO:
2150         case IPV6_UNICAST_HOPS:
2151         case IPV6_MULTICAST_HOPS:
2152         case IPV6_MULTICAST_LOOP:
2153         case IPV6_RECVERR:
2154         case IPV6_RECVHOPLIMIT:
2155         case IPV6_2292HOPLIMIT:
2156         case IPV6_CHECKSUM:
2157         case IPV6_ADDRFORM:
2158         case IPV6_2292PKTINFO:
2159         case IPV6_RECVTCLASS:
2160         case IPV6_RECVRTHDR:
2161         case IPV6_2292RTHDR:
2162         case IPV6_RECVHOPOPTS:
2163         case IPV6_2292HOPOPTS:
2164         case IPV6_RECVDSTOPTS:
2165         case IPV6_2292DSTOPTS:
2166         case IPV6_TCLASS:
2167         case IPV6_ADDR_PREFERENCES:
2168 #ifdef IPV6_RECVPATHMTU
2169         case IPV6_RECVPATHMTU:
2170 #endif
2171 #ifdef IPV6_TRANSPARENT
2172         case IPV6_TRANSPARENT:
2173 #endif
2174 #ifdef IPV6_FREEBIND
2175         case IPV6_FREEBIND:
2176 #endif
2177 #ifdef IPV6_RECVORIGDSTADDR
2178         case IPV6_RECVORIGDSTADDR:
2179 #endif
2180             val = 0;
2181             if (optlen < sizeof(uint32_t)) {
2182                 return -TARGET_EINVAL;
2183             }
2184             if (get_user_u32(val, optval_addr)) {
2185                 return -TARGET_EFAULT;
2186             }
2187             ret = get_errno(setsockopt(sockfd, level, optname,
2188                                        &val, sizeof(val)));
2189             break;
2190         case IPV6_PKTINFO:
2191         {
2192             struct in6_pktinfo pki;
2193 
2194             if (optlen < sizeof(pki)) {
2195                 return -TARGET_EINVAL;
2196             }
2197 
2198             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2199                 return -TARGET_EFAULT;
2200             }
2201 
2202             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2203 
2204             ret = get_errno(setsockopt(sockfd, level, optname,
2205                                        &pki, sizeof(pki)));
2206             break;
2207         }
2208         case IPV6_ADD_MEMBERSHIP:
2209         case IPV6_DROP_MEMBERSHIP:
2210         {
2211             struct ipv6_mreq ipv6mreq;
2212 
2213             if (optlen < sizeof(ipv6mreq)) {
2214                 return -TARGET_EINVAL;
2215             }
2216 
2217             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2218                 return -TARGET_EFAULT;
2219             }
2220 
2221             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2222 
2223             ret = get_errno(setsockopt(sockfd, level, optname,
2224                                        &ipv6mreq, sizeof(ipv6mreq)));
2225             break;
2226         }
2227         default:
2228             goto unimplemented;
2229         }
2230         break;
2231     case SOL_ICMPV6:
2232         switch (optname) {
2233         case ICMPV6_FILTER:
2234         {
2235             struct icmp6_filter icmp6f;
2236 
2237             if (optlen > sizeof(icmp6f)) {
2238                 optlen = sizeof(icmp6f);
2239             }
2240 
2241             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2242                 return -TARGET_EFAULT;
2243             }
2244 
2245             for (val = 0; val < 8; val++) {
2246                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2247             }
2248 
2249             ret = get_errno(setsockopt(sockfd, level, optname,
2250                                        &icmp6f, optlen));
2251             break;
2252         }
2253         default:
2254             goto unimplemented;
2255         }
2256         break;
2257     case SOL_RAW:
2258         switch (optname) {
2259         case ICMP_FILTER:
2260         case IPV6_CHECKSUM:
2261             /* those take an u32 value */
2262             if (optlen < sizeof(uint32_t)) {
2263                 return -TARGET_EINVAL;
2264             }
2265 
2266             if (get_user_u32(val, optval_addr)) {
2267                 return -TARGET_EFAULT;
2268             }
2269             ret = get_errno(setsockopt(sockfd, level, optname,
2270                                        &val, sizeof(val)));
2271             break;
2272 
2273         default:
2274             goto unimplemented;
2275         }
2276         break;
2277 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2278     case SOL_ALG:
2279         switch (optname) {
2280         case ALG_SET_KEY:
2281         {
2282             char *alg_key = g_malloc(optlen);
2283 
2284             if (!alg_key) {
2285                 return -TARGET_ENOMEM;
2286             }
2287             if (copy_from_user(alg_key, optval_addr, optlen)) {
2288                 g_free(alg_key);
2289                 return -TARGET_EFAULT;
2290             }
2291             ret = get_errno(setsockopt(sockfd, level, optname,
2292                                        alg_key, optlen));
2293             g_free(alg_key);
2294             break;
2295         }
2296         case ALG_SET_AEAD_AUTHSIZE:
2297         {
2298             ret = get_errno(setsockopt(sockfd, level, optname,
2299                                        NULL, optlen));
2300             break;
2301         }
2302         default:
2303             goto unimplemented;
2304         }
2305         break;
2306 #endif
2307     case TARGET_SOL_SOCKET:
2308         switch (optname) {
2309         case TARGET_SO_RCVTIMEO:
2310         {
2311                 struct timeval tv;
2312 
2313                 optname = SO_RCVTIMEO;
2314 
2315 set_timeout:
2316                 if (optlen != sizeof(struct target_timeval)) {
2317                     return -TARGET_EINVAL;
2318                 }
2319 
2320                 if (copy_from_user_timeval(&tv, optval_addr)) {
2321                     return -TARGET_EFAULT;
2322                 }
2323 
2324                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2325                                 &tv, sizeof(tv)));
2326                 return ret;
2327         }
2328         case TARGET_SO_SNDTIMEO:
2329                 optname = SO_SNDTIMEO;
2330                 goto set_timeout;
2331         case TARGET_SO_ATTACH_FILTER:
2332         {
2333                 struct target_sock_fprog *tfprog;
2334                 struct target_sock_filter *tfilter;
2335                 struct sock_fprog fprog;
2336                 struct sock_filter *filter;
2337                 int i;
2338 
2339                 if (optlen != sizeof(*tfprog)) {
2340                     return -TARGET_EINVAL;
2341                 }
2342                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2343                     return -TARGET_EFAULT;
2344                 }
2345                 if (!lock_user_struct(VERIFY_READ, tfilter,
2346                                       tswapal(tfprog->filter), 0)) {
2347                     unlock_user_struct(tfprog, optval_addr, 1);
2348                     return -TARGET_EFAULT;
2349                 }
2350 
2351                 fprog.len = tswap16(tfprog->len);
2352                 filter = g_try_new(struct sock_filter, fprog.len);
2353                 if (filter == NULL) {
2354                     unlock_user_struct(tfilter, tfprog->filter, 1);
2355                     unlock_user_struct(tfprog, optval_addr, 1);
2356                     return -TARGET_ENOMEM;
2357                 }
2358                 for (i = 0; i < fprog.len; i++) {
2359                     filter[i].code = tswap16(tfilter[i].code);
2360                     filter[i].jt = tfilter[i].jt;
2361                     filter[i].jf = tfilter[i].jf;
2362                     filter[i].k = tswap32(tfilter[i].k);
2363                 }
2364                 fprog.filter = filter;
2365 
2366                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2367                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2368                 g_free(filter);
2369 
2370                 unlock_user_struct(tfilter, tfprog->filter, 1);
2371                 unlock_user_struct(tfprog, optval_addr, 1);
2372                 return ret;
2373         }
2374 	case TARGET_SO_BINDTODEVICE:
2375 	{
2376 		char *dev_ifname, *addr_ifname;
2377 
2378 		if (optlen > IFNAMSIZ - 1) {
2379 		    optlen = IFNAMSIZ - 1;
2380 		}
2381 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2382 		if (!dev_ifname) {
2383 		    return -TARGET_EFAULT;
2384 		}
2385 		optname = SO_BINDTODEVICE;
2386 		addr_ifname = alloca(IFNAMSIZ);
2387 		memcpy(addr_ifname, dev_ifname, optlen);
2388 		addr_ifname[optlen] = 0;
2389 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2390                                            addr_ifname, optlen));
2391 		unlock_user (dev_ifname, optval_addr, 0);
2392 		return ret;
2393 	}
2394         case TARGET_SO_LINGER:
2395         {
2396                 struct linger lg;
2397                 struct target_linger *tlg;
2398 
2399                 if (optlen != sizeof(struct target_linger)) {
2400                     return -TARGET_EINVAL;
2401                 }
2402                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2403                     return -TARGET_EFAULT;
2404                 }
2405                 __get_user(lg.l_onoff, &tlg->l_onoff);
2406                 __get_user(lg.l_linger, &tlg->l_linger);
2407                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2408                                 &lg, sizeof(lg)));
2409                 unlock_user_struct(tlg, optval_addr, 0);
2410                 return ret;
2411         }
2412             /* Options with 'int' argument.  */
2413         case TARGET_SO_DEBUG:
2414 		optname = SO_DEBUG;
2415 		break;
2416         case TARGET_SO_REUSEADDR:
2417 		optname = SO_REUSEADDR;
2418 		break;
2419 #ifdef SO_REUSEPORT
2420         case TARGET_SO_REUSEPORT:
2421                 optname = SO_REUSEPORT;
2422                 break;
2423 #endif
2424         case TARGET_SO_TYPE:
2425 		optname = SO_TYPE;
2426 		break;
2427         case TARGET_SO_ERROR:
2428 		optname = SO_ERROR;
2429 		break;
2430         case TARGET_SO_DONTROUTE:
2431 		optname = SO_DONTROUTE;
2432 		break;
2433         case TARGET_SO_BROADCAST:
2434 		optname = SO_BROADCAST;
2435 		break;
2436         case TARGET_SO_SNDBUF:
2437 		optname = SO_SNDBUF;
2438 		break;
2439         case TARGET_SO_SNDBUFFORCE:
2440                 optname = SO_SNDBUFFORCE;
2441                 break;
2442         case TARGET_SO_RCVBUF:
2443 		optname = SO_RCVBUF;
2444 		break;
2445         case TARGET_SO_RCVBUFFORCE:
2446                 optname = SO_RCVBUFFORCE;
2447                 break;
2448         case TARGET_SO_KEEPALIVE:
2449 		optname = SO_KEEPALIVE;
2450 		break;
2451         case TARGET_SO_OOBINLINE:
2452 		optname = SO_OOBINLINE;
2453 		break;
2454         case TARGET_SO_NO_CHECK:
2455 		optname = SO_NO_CHECK;
2456 		break;
2457         case TARGET_SO_PRIORITY:
2458 		optname = SO_PRIORITY;
2459 		break;
2460 #ifdef SO_BSDCOMPAT
2461         case TARGET_SO_BSDCOMPAT:
2462 		optname = SO_BSDCOMPAT;
2463 		break;
2464 #endif
2465         case TARGET_SO_PASSCRED:
2466 		optname = SO_PASSCRED;
2467 		break;
2468         case TARGET_SO_PASSSEC:
2469                 optname = SO_PASSSEC;
2470                 break;
2471         case TARGET_SO_TIMESTAMP:
2472 		optname = SO_TIMESTAMP;
2473 		break;
2474         case TARGET_SO_RCVLOWAT:
2475 		optname = SO_RCVLOWAT;
2476 		break;
2477         default:
2478             goto unimplemented;
2479         }
2480 	if (optlen < sizeof(uint32_t))
2481             return -TARGET_EINVAL;
2482 
2483 	if (get_user_u32(val, optval_addr))
2484             return -TARGET_EFAULT;
2485 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2486         break;
2487 #ifdef SOL_NETLINK
2488     case SOL_NETLINK:
2489         switch (optname) {
2490         case NETLINK_PKTINFO:
2491         case NETLINK_ADD_MEMBERSHIP:
2492         case NETLINK_DROP_MEMBERSHIP:
2493         case NETLINK_BROADCAST_ERROR:
2494         case NETLINK_NO_ENOBUFS:
2495 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2496         case NETLINK_LISTEN_ALL_NSID:
2497         case NETLINK_CAP_ACK:
2498 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2499 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2500         case NETLINK_EXT_ACK:
2501 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2502 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2503         case NETLINK_GET_STRICT_CHK:
2504 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2505             break;
2506         default:
2507             goto unimplemented;
2508         }
2509         val = 0;
2510         if (optlen < sizeof(uint32_t)) {
2511             return -TARGET_EINVAL;
2512         }
2513         if (get_user_u32(val, optval_addr)) {
2514             return -TARGET_EFAULT;
2515         }
2516         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2517                                    sizeof(val)));
2518         break;
2519 #endif /* SOL_NETLINK */
2520     default:
2521     unimplemented:
2522         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2523                       level, optname);
2524         ret = -TARGET_ENOPROTOOPT;
2525     }
2526     return ret;
2527 }
2528 
2529 /* do_getsockopt() Must return target values and target errnos. */
2530 static abi_long do_getsockopt(int sockfd, int level, int optname,
2531                               abi_ulong optval_addr, abi_ulong optlen)
2532 {
2533     abi_long ret;
2534     int len, val;
2535     socklen_t lv;
2536 
2537     switch(level) {
2538     case TARGET_SOL_SOCKET:
2539         level = SOL_SOCKET;
2540         switch (optname) {
2541         /* These don't just return a single integer */
2542         case TARGET_SO_PEERNAME:
2543             goto unimplemented;
2544         case TARGET_SO_RCVTIMEO: {
2545             struct timeval tv;
2546             socklen_t tvlen;
2547 
2548             optname = SO_RCVTIMEO;
2549 
2550 get_timeout:
2551             if (get_user_u32(len, optlen)) {
2552                 return -TARGET_EFAULT;
2553             }
2554             if (len < 0) {
2555                 return -TARGET_EINVAL;
2556             }
2557 
2558             tvlen = sizeof(tv);
2559             ret = get_errno(getsockopt(sockfd, level, optname,
2560                                        &tv, &tvlen));
2561             if (ret < 0) {
2562                 return ret;
2563             }
2564             if (len > sizeof(struct target_timeval)) {
2565                 len = sizeof(struct target_timeval);
2566             }
2567             if (copy_to_user_timeval(optval_addr, &tv)) {
2568                 return -TARGET_EFAULT;
2569             }
2570             if (put_user_u32(len, optlen)) {
2571                 return -TARGET_EFAULT;
2572             }
2573             break;
2574         }
2575         case TARGET_SO_SNDTIMEO:
2576             optname = SO_SNDTIMEO;
2577             goto get_timeout;
2578         case TARGET_SO_PEERCRED: {
2579             struct ucred cr;
2580             socklen_t crlen;
2581             struct target_ucred *tcr;
2582 
2583             if (get_user_u32(len, optlen)) {
2584                 return -TARGET_EFAULT;
2585             }
2586             if (len < 0) {
2587                 return -TARGET_EINVAL;
2588             }
2589 
2590             crlen = sizeof(cr);
2591             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2592                                        &cr, &crlen));
2593             if (ret < 0) {
2594                 return ret;
2595             }
2596             if (len > crlen) {
2597                 len = crlen;
2598             }
2599             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             __put_user(cr.pid, &tcr->pid);
2603             __put_user(cr.uid, &tcr->uid);
2604             __put_user(cr.gid, &tcr->gid);
2605             unlock_user_struct(tcr, optval_addr, 1);
2606             if (put_user_u32(len, optlen)) {
2607                 return -TARGET_EFAULT;
2608             }
2609             break;
2610         }
2611         case TARGET_SO_PEERSEC: {
2612             char *name;
2613 
2614             if (get_user_u32(len, optlen)) {
2615                 return -TARGET_EFAULT;
2616             }
2617             if (len < 0) {
2618                 return -TARGET_EINVAL;
2619             }
2620             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2621             if (!name) {
2622                 return -TARGET_EFAULT;
2623             }
2624             lv = len;
2625             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2626                                        name, &lv));
2627             if (put_user_u32(lv, optlen)) {
2628                 ret = -TARGET_EFAULT;
2629             }
2630             unlock_user(name, optval_addr, lv);
2631             break;
2632         }
2633         case TARGET_SO_LINGER:
2634         {
2635             struct linger lg;
2636             socklen_t lglen;
2637             struct target_linger *tlg;
2638 
2639             if (get_user_u32(len, optlen)) {
2640                 return -TARGET_EFAULT;
2641             }
2642             if (len < 0) {
2643                 return -TARGET_EINVAL;
2644             }
2645 
2646             lglen = sizeof(lg);
2647             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2648                                        &lg, &lglen));
2649             if (ret < 0) {
2650                 return ret;
2651             }
2652             if (len > lglen) {
2653                 len = lglen;
2654             }
2655             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             __put_user(lg.l_onoff, &tlg->l_onoff);
2659             __put_user(lg.l_linger, &tlg->l_linger);
2660             unlock_user_struct(tlg, optval_addr, 1);
2661             if (put_user_u32(len, optlen)) {
2662                 return -TARGET_EFAULT;
2663             }
2664             break;
2665         }
2666         /* Options with 'int' argument.  */
2667         case TARGET_SO_DEBUG:
2668             optname = SO_DEBUG;
2669             goto int_case;
2670         case TARGET_SO_REUSEADDR:
2671             optname = SO_REUSEADDR;
2672             goto int_case;
2673 #ifdef SO_REUSEPORT
2674         case TARGET_SO_REUSEPORT:
2675             optname = SO_REUSEPORT;
2676             goto int_case;
2677 #endif
2678         case TARGET_SO_TYPE:
2679             optname = SO_TYPE;
2680             goto int_case;
2681         case TARGET_SO_ERROR:
2682             optname = SO_ERROR;
2683             goto int_case;
2684         case TARGET_SO_DONTROUTE:
2685             optname = SO_DONTROUTE;
2686             goto int_case;
2687         case TARGET_SO_BROADCAST:
2688             optname = SO_BROADCAST;
2689             goto int_case;
2690         case TARGET_SO_SNDBUF:
2691             optname = SO_SNDBUF;
2692             goto int_case;
2693         case TARGET_SO_RCVBUF:
2694             optname = SO_RCVBUF;
2695             goto int_case;
2696         case TARGET_SO_KEEPALIVE:
2697             optname = SO_KEEPALIVE;
2698             goto int_case;
2699         case TARGET_SO_OOBINLINE:
2700             optname = SO_OOBINLINE;
2701             goto int_case;
2702         case TARGET_SO_NO_CHECK:
2703             optname = SO_NO_CHECK;
2704             goto int_case;
2705         case TARGET_SO_PRIORITY:
2706             optname = SO_PRIORITY;
2707             goto int_case;
2708 #ifdef SO_BSDCOMPAT
2709         case TARGET_SO_BSDCOMPAT:
2710             optname = SO_BSDCOMPAT;
2711             goto int_case;
2712 #endif
2713         case TARGET_SO_PASSCRED:
2714             optname = SO_PASSCRED;
2715             goto int_case;
2716         case TARGET_SO_TIMESTAMP:
2717             optname = SO_TIMESTAMP;
2718             goto int_case;
2719         case TARGET_SO_RCVLOWAT:
2720             optname = SO_RCVLOWAT;
2721             goto int_case;
2722         case TARGET_SO_ACCEPTCONN:
2723             optname = SO_ACCEPTCONN;
2724             goto int_case;
2725         case TARGET_SO_PROTOCOL:
2726             optname = SO_PROTOCOL;
2727             goto int_case;
2728         case TARGET_SO_DOMAIN:
2729             optname = SO_DOMAIN;
2730             goto int_case;
2731         default:
2732             goto int_case;
2733         }
2734         break;
2735     case SOL_TCP:
2736     case SOL_UDP:
2737         /* TCP and UDP options all take an 'int' value.  */
2738     int_case:
2739         if (get_user_u32(len, optlen))
2740             return -TARGET_EFAULT;
2741         if (len < 0)
2742             return -TARGET_EINVAL;
2743         lv = sizeof(lv);
2744         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2745         if (ret < 0)
2746             return ret;
2747         if (optname == SO_TYPE) {
2748             val = host_to_target_sock_type(val);
2749         }
2750         if (len > lv)
2751             len = lv;
2752         if (len == 4) {
2753             if (put_user_u32(val, optval_addr))
2754                 return -TARGET_EFAULT;
2755         } else {
2756             if (put_user_u8(val, optval_addr))
2757                 return -TARGET_EFAULT;
2758         }
2759         if (put_user_u32(len, optlen))
2760             return -TARGET_EFAULT;
2761         break;
2762     case SOL_IP:
2763         switch(optname) {
2764         case IP_TOS:
2765         case IP_TTL:
2766         case IP_HDRINCL:
2767         case IP_ROUTER_ALERT:
2768         case IP_RECVOPTS:
2769         case IP_RETOPTS:
2770         case IP_PKTINFO:
2771         case IP_MTU_DISCOVER:
2772         case IP_RECVERR:
2773         case IP_RECVTOS:
2774 #ifdef IP_FREEBIND
2775         case IP_FREEBIND:
2776 #endif
2777         case IP_MULTICAST_TTL:
2778         case IP_MULTICAST_LOOP:
2779             if (get_user_u32(len, optlen))
2780                 return -TARGET_EFAULT;
2781             if (len < 0)
2782                 return -TARGET_EINVAL;
2783             lv = sizeof(lv);
2784             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2785             if (ret < 0)
2786                 return ret;
2787             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2788                 len = 1;
2789                 if (put_user_u32(len, optlen)
2790                     || put_user_u8(val, optval_addr))
2791                     return -TARGET_EFAULT;
2792             } else {
2793                 if (len > sizeof(int))
2794                     len = sizeof(int);
2795                 if (put_user_u32(len, optlen)
2796                     || put_user_u32(val, optval_addr))
2797                     return -TARGET_EFAULT;
2798             }
2799             break;
2800         default:
2801             ret = -TARGET_ENOPROTOOPT;
2802             break;
2803         }
2804         break;
2805     case SOL_IPV6:
2806         switch (optname) {
2807         case IPV6_MTU_DISCOVER:
2808         case IPV6_MTU:
2809         case IPV6_V6ONLY:
2810         case IPV6_RECVPKTINFO:
2811         case IPV6_UNICAST_HOPS:
2812         case IPV6_MULTICAST_HOPS:
2813         case IPV6_MULTICAST_LOOP:
2814         case IPV6_RECVERR:
2815         case IPV6_RECVHOPLIMIT:
2816         case IPV6_2292HOPLIMIT:
2817         case IPV6_CHECKSUM:
2818         case IPV6_ADDRFORM:
2819         case IPV6_2292PKTINFO:
2820         case IPV6_RECVTCLASS:
2821         case IPV6_RECVRTHDR:
2822         case IPV6_2292RTHDR:
2823         case IPV6_RECVHOPOPTS:
2824         case IPV6_2292HOPOPTS:
2825         case IPV6_RECVDSTOPTS:
2826         case IPV6_2292DSTOPTS:
2827         case IPV6_TCLASS:
2828         case IPV6_ADDR_PREFERENCES:
2829 #ifdef IPV6_RECVPATHMTU
2830         case IPV6_RECVPATHMTU:
2831 #endif
2832 #ifdef IPV6_TRANSPARENT
2833         case IPV6_TRANSPARENT:
2834 #endif
2835 #ifdef IPV6_FREEBIND
2836         case IPV6_FREEBIND:
2837 #endif
2838 #ifdef IPV6_RECVORIGDSTADDR
2839         case IPV6_RECVORIGDSTADDR:
2840 #endif
2841             if (get_user_u32(len, optlen))
2842                 return -TARGET_EFAULT;
2843             if (len < 0)
2844                 return -TARGET_EINVAL;
2845             lv = sizeof(lv);
2846             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2847             if (ret < 0)
2848                 return ret;
2849             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2850                 len = 1;
2851                 if (put_user_u32(len, optlen)
2852                     || put_user_u8(val, optval_addr))
2853                     return -TARGET_EFAULT;
2854             } else {
2855                 if (len > sizeof(int))
2856                     len = sizeof(int);
2857                 if (put_user_u32(len, optlen)
2858                     || put_user_u32(val, optval_addr))
2859                     return -TARGET_EFAULT;
2860             }
2861             break;
2862         default:
2863             ret = -TARGET_ENOPROTOOPT;
2864             break;
2865         }
2866         break;
2867 #ifdef SOL_NETLINK
2868     case SOL_NETLINK:
2869         switch (optname) {
2870         case NETLINK_PKTINFO:
2871         case NETLINK_BROADCAST_ERROR:
2872         case NETLINK_NO_ENOBUFS:
2873 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2874         case NETLINK_LISTEN_ALL_NSID:
2875         case NETLINK_CAP_ACK:
2876 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2878         case NETLINK_EXT_ACK:
2879 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2880 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2881         case NETLINK_GET_STRICT_CHK:
2882 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2883             if (get_user_u32(len, optlen)) {
2884                 return -TARGET_EFAULT;
2885             }
2886             if (len != sizeof(val)) {
2887                 return -TARGET_EINVAL;
2888             }
2889             lv = len;
2890             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2891             if (ret < 0) {
2892                 return ret;
2893             }
2894             if (put_user_u32(lv, optlen)
2895                 || put_user_u32(val, optval_addr)) {
2896                 return -TARGET_EFAULT;
2897             }
2898             break;
2899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2900         case NETLINK_LIST_MEMBERSHIPS:
2901         {
2902             uint32_t *results;
2903             int i;
2904             if (get_user_u32(len, optlen)) {
2905                 return -TARGET_EFAULT;
2906             }
2907             if (len < 0) {
2908                 return -TARGET_EINVAL;
2909             }
2910             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2911             if (!results && len > 0) {
2912                 return -TARGET_EFAULT;
2913             }
2914             lv = len;
2915             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2916             if (ret < 0) {
2917                 unlock_user(results, optval_addr, 0);
2918                 return ret;
2919             }
2920             /* swap host endianess to target endianess. */
2921             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2922                 results[i] = tswap32(results[i]);
2923             }
2924             if (put_user_u32(lv, optlen)) {
2925                 return -TARGET_EFAULT;
2926             }
2927             unlock_user(results, optval_addr, 0);
2928             break;
2929         }
2930 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2931         default:
2932             goto unimplemented;
2933         }
2934         break;
2935 #endif /* SOL_NETLINK */
2936     default:
2937     unimplemented:
2938         qemu_log_mask(LOG_UNIMP,
2939                       "getsockopt level=%d optname=%d not yet supported\n",
2940                       level, optname);
2941         ret = -TARGET_EOPNOTSUPP;
2942         break;
2943     }
2944     return ret;
2945 }
2946 
2947 /* Convert target low/high pair representing file offset into the host
2948  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2949  * as the kernel doesn't handle them either.
2950  */
2951 static void target_to_host_low_high(abi_ulong tlow,
2952                                     abi_ulong thigh,
2953                                     unsigned long *hlow,
2954                                     unsigned long *hhigh)
2955 {
2956     uint64_t off = tlow |
2957         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2958         TARGET_LONG_BITS / 2;
2959 
2960     *hlow = off;
2961     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2962 }
2963 
2964 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2965                                 abi_ulong count, int copy)
2966 {
2967     struct target_iovec *target_vec;
2968     struct iovec *vec;
2969     abi_ulong total_len, max_len;
2970     int i;
2971     int err = 0;
2972     bool bad_address = false;
2973 
2974     if (count == 0) {
2975         errno = 0;
2976         return NULL;
2977     }
2978     if (count > IOV_MAX) {
2979         errno = EINVAL;
2980         return NULL;
2981     }
2982 
2983     vec = g_try_new0(struct iovec, count);
2984     if (vec == NULL) {
2985         errno = ENOMEM;
2986         return NULL;
2987     }
2988 
2989     target_vec = lock_user(VERIFY_READ, target_addr,
2990                            count * sizeof(struct target_iovec), 1);
2991     if (target_vec == NULL) {
2992         err = EFAULT;
2993         goto fail2;
2994     }
2995 
2996     /* ??? If host page size > target page size, this will result in a
2997        value larger than what we can actually support.  */
2998     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2999     total_len = 0;
3000 
3001     for (i = 0; i < count; i++) {
3002         abi_ulong base = tswapal(target_vec[i].iov_base);
3003         abi_long len = tswapal(target_vec[i].iov_len);
3004 
3005         if (len < 0) {
3006             err = EINVAL;
3007             goto fail;
3008         } else if (len == 0) {
3009             /* Zero length pointer is ignored.  */
3010             vec[i].iov_base = 0;
3011         } else {
3012             vec[i].iov_base = lock_user(type, base, len, copy);
3013             /* If the first buffer pointer is bad, this is a fault.  But
3014              * subsequent bad buffers will result in a partial write; this
3015              * is realized by filling the vector with null pointers and
3016              * zero lengths. */
3017             if (!vec[i].iov_base) {
3018                 if (i == 0) {
3019                     err = EFAULT;
3020                     goto fail;
3021                 } else {
3022                     bad_address = true;
3023                 }
3024             }
3025             if (bad_address) {
3026                 len = 0;
3027             }
3028             if (len > max_len - total_len) {
3029                 len = max_len - total_len;
3030             }
3031         }
3032         vec[i].iov_len = len;
3033         total_len += len;
3034     }
3035 
3036     unlock_user(target_vec, target_addr, 0);
3037     return vec;
3038 
3039  fail:
3040     while (--i >= 0) {
3041         if (tswapal(target_vec[i].iov_len) > 0) {
3042             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3043         }
3044     }
3045     unlock_user(target_vec, target_addr, 0);
3046  fail2:
3047     g_free(vec);
3048     errno = err;
3049     return NULL;
3050 }
3051 
3052 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3053                          abi_ulong count, int copy)
3054 {
3055     struct target_iovec *target_vec;
3056     int i;
3057 
3058     target_vec = lock_user(VERIFY_READ, target_addr,
3059                            count * sizeof(struct target_iovec), 1);
3060     if (target_vec) {
3061         for (i = 0; i < count; i++) {
3062             abi_ulong base = tswapal(target_vec[i].iov_base);
3063             abi_long len = tswapal(target_vec[i].iov_len);
3064             if (len < 0) {
3065                 break;
3066             }
3067             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3068         }
3069         unlock_user(target_vec, target_addr, 0);
3070     }
3071 
3072     g_free(vec);
3073 }
3074 
3075 static inline int target_to_host_sock_type(int *type)
3076 {
3077     int host_type = 0;
3078     int target_type = *type;
3079 
3080     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3081     case TARGET_SOCK_DGRAM:
3082         host_type = SOCK_DGRAM;
3083         break;
3084     case TARGET_SOCK_STREAM:
3085         host_type = SOCK_STREAM;
3086         break;
3087     default:
3088         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3089         break;
3090     }
3091     if (target_type & TARGET_SOCK_CLOEXEC) {
3092 #if defined(SOCK_CLOEXEC)
3093         host_type |= SOCK_CLOEXEC;
3094 #else
3095         return -TARGET_EINVAL;
3096 #endif
3097     }
3098     if (target_type & TARGET_SOCK_NONBLOCK) {
3099 #if defined(SOCK_NONBLOCK)
3100         host_type |= SOCK_NONBLOCK;
3101 #elif !defined(O_NONBLOCK)
3102         return -TARGET_EINVAL;
3103 #endif
3104     }
3105     *type = host_type;
3106     return 0;
3107 }
3108 
3109 /* Try to emulate socket type flags after socket creation.  */
3110 static int sock_flags_fixup(int fd, int target_type)
3111 {
3112 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3113     if (target_type & TARGET_SOCK_NONBLOCK) {
3114         int flags = fcntl(fd, F_GETFL);
3115         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3116             close(fd);
3117             return -TARGET_EINVAL;
3118         }
3119     }
3120 #endif
3121     return fd;
3122 }
3123 
3124 /* do_socket() Must return target values and target errnos. */
3125 static abi_long do_socket(int domain, int type, int protocol)
3126 {
3127     int target_type = type;
3128     int ret;
3129 
3130     ret = target_to_host_sock_type(&type);
3131     if (ret) {
3132         return ret;
3133     }
3134 
3135     if (domain == PF_NETLINK && !(
3136 #ifdef CONFIG_RTNETLINK
3137          protocol == NETLINK_ROUTE ||
3138 #endif
3139          protocol == NETLINK_KOBJECT_UEVENT ||
3140          protocol == NETLINK_AUDIT)) {
3141         return -TARGET_EPROTONOSUPPORT;
3142     }
3143 
3144     if (domain == AF_PACKET ||
3145         (domain == AF_INET && type == SOCK_PACKET)) {
3146         protocol = tswap16(protocol);
3147     }
3148 
3149     ret = get_errno(socket(domain, type, protocol));
3150     if (ret >= 0) {
3151         ret = sock_flags_fixup(ret, target_type);
3152         if (type == SOCK_PACKET) {
3153             /* Manage an obsolete case :
3154              * if socket type is SOCK_PACKET, bind by name
3155              */
3156             fd_trans_register(ret, &target_packet_trans);
3157         } else if (domain == PF_NETLINK) {
3158             switch (protocol) {
3159 #ifdef CONFIG_RTNETLINK
3160             case NETLINK_ROUTE:
3161                 fd_trans_register(ret, &target_netlink_route_trans);
3162                 break;
3163 #endif
3164             case NETLINK_KOBJECT_UEVENT:
3165                 /* nothing to do: messages are strings */
3166                 break;
3167             case NETLINK_AUDIT:
3168                 fd_trans_register(ret, &target_netlink_audit_trans);
3169                 break;
3170             default:
3171                 g_assert_not_reached();
3172             }
3173         }
3174     }
3175     return ret;
3176 }
3177 
3178 /* do_bind() Must return target values and target errnos. */
3179 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3180                         socklen_t addrlen)
3181 {
3182     void *addr;
3183     abi_long ret;
3184 
3185     if ((int)addrlen < 0) {
3186         return -TARGET_EINVAL;
3187     }
3188 
3189     addr = alloca(addrlen+1);
3190 
3191     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3192     if (ret)
3193         return ret;
3194 
3195     return get_errno(bind(sockfd, addr, addrlen));
3196 }
3197 
3198 /* do_connect() Must return target values and target errnos. */
3199 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3200                            socklen_t addrlen)
3201 {
3202     void *addr;
3203     abi_long ret;
3204 
3205     if ((int)addrlen < 0) {
3206         return -TARGET_EINVAL;
3207     }
3208 
3209     addr = alloca(addrlen+1);
3210 
3211     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3212     if (ret)
3213         return ret;
3214 
3215     return get_errno(safe_connect(sockfd, addr, addrlen));
3216 }
3217 
3218 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3219 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3220                                       int flags, int send)
3221 {
3222     abi_long ret, len;
3223     struct msghdr msg;
3224     abi_ulong count;
3225     struct iovec *vec;
3226     abi_ulong target_vec;
3227 
3228     if (msgp->msg_name) {
3229         msg.msg_namelen = tswap32(msgp->msg_namelen);
3230         msg.msg_name = alloca(msg.msg_namelen+1);
3231         ret = target_to_host_sockaddr(fd, msg.msg_name,
3232                                       tswapal(msgp->msg_name),
3233                                       msg.msg_namelen);
3234         if (ret == -TARGET_EFAULT) {
3235             /* For connected sockets msg_name and msg_namelen must
3236              * be ignored, so returning EFAULT immediately is wrong.
3237              * Instead, pass a bad msg_name to the host kernel, and
3238              * let it decide whether to return EFAULT or not.
3239              */
3240             msg.msg_name = (void *)-1;
3241         } else if (ret) {
3242             goto out2;
3243         }
3244     } else {
3245         msg.msg_name = NULL;
3246         msg.msg_namelen = 0;
3247     }
3248     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3249     msg.msg_control = alloca(msg.msg_controllen);
3250     memset(msg.msg_control, 0, msg.msg_controllen);
3251 
3252     msg.msg_flags = tswap32(msgp->msg_flags);
3253 
3254     count = tswapal(msgp->msg_iovlen);
3255     target_vec = tswapal(msgp->msg_iov);
3256 
3257     if (count > IOV_MAX) {
3258         /* sendrcvmsg returns a different errno for this condition than
3259          * readv/writev, so we must catch it here before lock_iovec() does.
3260          */
3261         ret = -TARGET_EMSGSIZE;
3262         goto out2;
3263     }
3264 
3265     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3266                      target_vec, count, send);
3267     if (vec == NULL) {
3268         ret = -host_to_target_errno(errno);
3269         goto out2;
3270     }
3271     msg.msg_iovlen = count;
3272     msg.msg_iov = vec;
3273 
3274     if (send) {
3275         if (fd_trans_target_to_host_data(fd)) {
3276             void *host_msg;
3277 
3278             host_msg = g_malloc(msg.msg_iov->iov_len);
3279             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3280             ret = fd_trans_target_to_host_data(fd)(host_msg,
3281                                                    msg.msg_iov->iov_len);
3282             if (ret >= 0) {
3283                 msg.msg_iov->iov_base = host_msg;
3284                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3285             }
3286             g_free(host_msg);
3287         } else {
3288             ret = target_to_host_cmsg(&msg, msgp);
3289             if (ret == 0) {
3290                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3291             }
3292         }
3293     } else {
3294         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3295         if (!is_error(ret)) {
3296             len = ret;
3297             if (fd_trans_host_to_target_data(fd)) {
3298                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3299                                                MIN(msg.msg_iov->iov_len, len));
3300             } else {
3301                 ret = host_to_target_cmsg(msgp, &msg);
3302             }
3303             if (!is_error(ret)) {
3304                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3305                 msgp->msg_flags = tswap32(msg.msg_flags);
3306                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3307                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3308                                     msg.msg_name, msg.msg_namelen);
3309                     if (ret) {
3310                         goto out;
3311                     }
3312                 }
3313 
3314                 ret = len;
3315             }
3316         }
3317     }
3318 
3319 out:
3320     unlock_iovec(vec, target_vec, count, !send);
3321 out2:
3322     return ret;
3323 }
3324 
3325 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3326                                int flags, int send)
3327 {
3328     abi_long ret;
3329     struct target_msghdr *msgp;
3330 
3331     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3332                           msgp,
3333                           target_msg,
3334                           send ? 1 : 0)) {
3335         return -TARGET_EFAULT;
3336     }
3337     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3338     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3339     return ret;
3340 }
3341 
3342 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3343  * so it might not have this *mmsg-specific flag either.
3344  */
3345 #ifndef MSG_WAITFORONE
3346 #define MSG_WAITFORONE 0x10000
3347 #endif
3348 
3349 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3350                                 unsigned int vlen, unsigned int flags,
3351                                 int send)
3352 {
3353     struct target_mmsghdr *mmsgp;
3354     abi_long ret = 0;
3355     int i;
3356 
3357     if (vlen > UIO_MAXIOV) {
3358         vlen = UIO_MAXIOV;
3359     }
3360 
3361     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3362     if (!mmsgp) {
3363         return -TARGET_EFAULT;
3364     }
3365 
3366     for (i = 0; i < vlen; i++) {
3367         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3368         if (is_error(ret)) {
3369             break;
3370         }
3371         mmsgp[i].msg_len = tswap32(ret);
3372         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3373         if (flags & MSG_WAITFORONE) {
3374             flags |= MSG_DONTWAIT;
3375         }
3376     }
3377 
3378     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3379 
3380     /* Return number of datagrams sent if we sent any at all;
3381      * otherwise return the error.
3382      */
3383     if (i) {
3384         return i;
3385     }
3386     return ret;
3387 }
3388 
3389 /* do_accept4() Must return target values and target errnos. */
3390 static abi_long do_accept4(int fd, abi_ulong target_addr,
3391                            abi_ulong target_addrlen_addr, int flags)
3392 {
3393     socklen_t addrlen, ret_addrlen;
3394     void *addr;
3395     abi_long ret;
3396     int host_flags;
3397 
3398     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3399 
3400     if (target_addr == 0) {
3401         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3402     }
3403 
3404     /* linux returns EFAULT if addrlen pointer is invalid */
3405     if (get_user_u32(addrlen, target_addrlen_addr))
3406         return -TARGET_EFAULT;
3407 
3408     if ((int)addrlen < 0) {
3409         return -TARGET_EINVAL;
3410     }
3411 
3412     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3413         return -TARGET_EFAULT;
3414     }
3415 
3416     addr = alloca(addrlen);
3417 
3418     ret_addrlen = addrlen;
3419     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3420     if (!is_error(ret)) {
3421         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3422         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3423             ret = -TARGET_EFAULT;
3424         }
3425     }
3426     return ret;
3427 }
3428 
3429 /* do_getpeername() Must return target values and target errnos. */
3430 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3431                                abi_ulong target_addrlen_addr)
3432 {
3433     socklen_t addrlen, ret_addrlen;
3434     void *addr;
3435     abi_long ret;
3436 
3437     if (get_user_u32(addrlen, target_addrlen_addr))
3438         return -TARGET_EFAULT;
3439 
3440     if ((int)addrlen < 0) {
3441         return -TARGET_EINVAL;
3442     }
3443 
3444     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3445         return -TARGET_EFAULT;
3446     }
3447 
3448     addr = alloca(addrlen);
3449 
3450     ret_addrlen = addrlen;
3451     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3452     if (!is_error(ret)) {
3453         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3454         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3455             ret = -TARGET_EFAULT;
3456         }
3457     }
3458     return ret;
3459 }
3460 
3461 /* do_getsockname() Must return target values and target errnos. */
3462 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3463                                abi_ulong target_addrlen_addr)
3464 {
3465     socklen_t addrlen, ret_addrlen;
3466     void *addr;
3467     abi_long ret;
3468 
3469     if (get_user_u32(addrlen, target_addrlen_addr))
3470         return -TARGET_EFAULT;
3471 
3472     if ((int)addrlen < 0) {
3473         return -TARGET_EINVAL;
3474     }
3475 
3476     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3477         return -TARGET_EFAULT;
3478     }
3479 
3480     addr = alloca(addrlen);
3481 
3482     ret_addrlen = addrlen;
3483     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3484     if (!is_error(ret)) {
3485         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3486         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3487             ret = -TARGET_EFAULT;
3488         }
3489     }
3490     return ret;
3491 }
3492 
3493 /* do_socketpair() Must return target values and target errnos. */
3494 static abi_long do_socketpair(int domain, int type, int protocol,
3495                               abi_ulong target_tab_addr)
3496 {
3497     int tab[2];
3498     abi_long ret;
3499 
3500     target_to_host_sock_type(&type);
3501 
3502     ret = get_errno(socketpair(domain, type, protocol, tab));
3503     if (!is_error(ret)) {
3504         if (put_user_s32(tab[0], target_tab_addr)
3505             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3506             ret = -TARGET_EFAULT;
3507     }
3508     return ret;
3509 }
3510 
3511 /* do_sendto() Must return target values and target errnos. */
3512 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3513                           abi_ulong target_addr, socklen_t addrlen)
3514 {
3515     void *addr;
3516     void *host_msg;
3517     void *copy_msg = NULL;
3518     abi_long ret;
3519 
3520     if ((int)addrlen < 0) {
3521         return -TARGET_EINVAL;
3522     }
3523 
3524     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3525     if (!host_msg)
3526         return -TARGET_EFAULT;
3527     if (fd_trans_target_to_host_data(fd)) {
3528         copy_msg = host_msg;
3529         host_msg = g_malloc(len);
3530         memcpy(host_msg, copy_msg, len);
3531         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3532         if (ret < 0) {
3533             goto fail;
3534         }
3535     }
3536     if (target_addr) {
3537         addr = alloca(addrlen+1);
3538         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3539         if (ret) {
3540             goto fail;
3541         }
3542         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3543     } else {
3544         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3545     }
3546 fail:
3547     if (copy_msg) {
3548         g_free(host_msg);
3549         host_msg = copy_msg;
3550     }
3551     unlock_user(host_msg, msg, 0);
3552     return ret;
3553 }
3554 
3555 /* do_recvfrom() Must return target values and target errnos. */
3556 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3557                             abi_ulong target_addr,
3558                             abi_ulong target_addrlen)
3559 {
3560     socklen_t addrlen, ret_addrlen;
3561     void *addr;
3562     void *host_msg;
3563     abi_long ret;
3564 
3565     if (!msg) {
3566         host_msg = NULL;
3567     } else {
3568         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3569         if (!host_msg) {
3570             return -TARGET_EFAULT;
3571         }
3572     }
3573     if (target_addr) {
3574         if (get_user_u32(addrlen, target_addrlen)) {
3575             ret = -TARGET_EFAULT;
3576             goto fail;
3577         }
3578         if ((int)addrlen < 0) {
3579             ret = -TARGET_EINVAL;
3580             goto fail;
3581         }
3582         addr = alloca(addrlen);
3583         ret_addrlen = addrlen;
3584         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3585                                       addr, &ret_addrlen));
3586     } else {
3587         addr = NULL; /* To keep compiler quiet.  */
3588         addrlen = 0; /* To keep compiler quiet.  */
3589         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3590     }
3591     if (!is_error(ret)) {
3592         if (fd_trans_host_to_target_data(fd)) {
3593             abi_long trans;
3594             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3595             if (is_error(trans)) {
3596                 ret = trans;
3597                 goto fail;
3598             }
3599         }
3600         if (target_addr) {
3601             host_to_target_sockaddr(target_addr, addr,
3602                                     MIN(addrlen, ret_addrlen));
3603             if (put_user_u32(ret_addrlen, target_addrlen)) {
3604                 ret = -TARGET_EFAULT;
3605                 goto fail;
3606             }
3607         }
3608         unlock_user(host_msg, msg, len);
3609     } else {
3610 fail:
3611         unlock_user(host_msg, msg, 0);
3612     }
3613     return ret;
3614 }
3615 
3616 #ifdef TARGET_NR_socketcall
3617 /* do_socketcall() must return target values and target errnos. */
3618 static abi_long do_socketcall(int num, abi_ulong vptr)
3619 {
3620     static const unsigned nargs[] = { /* number of arguments per operation */
3621         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3622         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3623         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3624         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3625         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3626         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3627         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3628         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3629         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3630         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3631         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3632         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3633         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3634         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3635         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3636         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3637         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3638         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3639         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3640         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3641     };
3642     abi_long a[6]; /* max 6 args */
3643     unsigned i;
3644 
3645     /* check the range of the first argument num */
3646     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3647     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3648         return -TARGET_EINVAL;
3649     }
3650     /* ensure we have space for args */
3651     if (nargs[num] > ARRAY_SIZE(a)) {
3652         return -TARGET_EINVAL;
3653     }
3654     /* collect the arguments in a[] according to nargs[] */
3655     for (i = 0; i < nargs[num]; ++i) {
3656         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3657             return -TARGET_EFAULT;
3658         }
3659     }
3660     /* now when we have the args, invoke the appropriate underlying function */
3661     switch (num) {
3662     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3663         return do_socket(a[0], a[1], a[2]);
3664     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3665         return do_bind(a[0], a[1], a[2]);
3666     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3667         return do_connect(a[0], a[1], a[2]);
3668     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3669         return get_errno(listen(a[0], a[1]));
3670     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3671         return do_accept4(a[0], a[1], a[2], 0);
3672     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3673         return do_getsockname(a[0], a[1], a[2]);
3674     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3675         return do_getpeername(a[0], a[1], a[2]);
3676     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3677         return do_socketpair(a[0], a[1], a[2], a[3]);
3678     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3679         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3680     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3681         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3682     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3683         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3684     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3685         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3686     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3687         return get_errno(shutdown(a[0], a[1]));
3688     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3689         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3690     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3691         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3692     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3693         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3694     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3695         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3696     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3697         return do_accept4(a[0], a[1], a[2], a[3]);
3698     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3699         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3700     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3701         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3702     default:
3703         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3704         return -TARGET_EINVAL;
3705     }
3706 }
3707 #endif
3708 
3709 #define N_SHM_REGIONS	32
3710 
3711 static struct shm_region {
3712     abi_ulong start;
3713     abi_ulong size;
3714     bool in_use;
3715 } shm_regions[N_SHM_REGIONS];
3716 
3717 #ifndef TARGET_SEMID64_DS
3718 /* asm-generic version of this struct */
3719 struct target_semid64_ds
3720 {
3721   struct target_ipc_perm sem_perm;
3722   abi_ulong sem_otime;
3723 #if TARGET_ABI_BITS == 32
3724   abi_ulong __unused1;
3725 #endif
3726   abi_ulong sem_ctime;
3727 #if TARGET_ABI_BITS == 32
3728   abi_ulong __unused2;
3729 #endif
3730   abi_ulong sem_nsems;
3731   abi_ulong __unused3;
3732   abi_ulong __unused4;
3733 };
3734 #endif
3735 
3736 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3737                                                abi_ulong target_addr)
3738 {
3739     struct target_ipc_perm *target_ip;
3740     struct target_semid64_ds *target_sd;
3741 
3742     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3743         return -TARGET_EFAULT;
3744     target_ip = &(target_sd->sem_perm);
3745     host_ip->__key = tswap32(target_ip->__key);
3746     host_ip->uid = tswap32(target_ip->uid);
3747     host_ip->gid = tswap32(target_ip->gid);
3748     host_ip->cuid = tswap32(target_ip->cuid);
3749     host_ip->cgid = tswap32(target_ip->cgid);
3750 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3751     host_ip->mode = tswap32(target_ip->mode);
3752 #else
3753     host_ip->mode = tswap16(target_ip->mode);
3754 #endif
3755 #if defined(TARGET_PPC)
3756     host_ip->__seq = tswap32(target_ip->__seq);
3757 #else
3758     host_ip->__seq = tswap16(target_ip->__seq);
3759 #endif
3760     unlock_user_struct(target_sd, target_addr, 0);
3761     return 0;
3762 }
3763 
3764 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3765                                                struct ipc_perm *host_ip)
3766 {
3767     struct target_ipc_perm *target_ip;
3768     struct target_semid64_ds *target_sd;
3769 
3770     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3771         return -TARGET_EFAULT;
3772     target_ip = &(target_sd->sem_perm);
3773     target_ip->__key = tswap32(host_ip->__key);
3774     target_ip->uid = tswap32(host_ip->uid);
3775     target_ip->gid = tswap32(host_ip->gid);
3776     target_ip->cuid = tswap32(host_ip->cuid);
3777     target_ip->cgid = tswap32(host_ip->cgid);
3778 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3779     target_ip->mode = tswap32(host_ip->mode);
3780 #else
3781     target_ip->mode = tswap16(host_ip->mode);
3782 #endif
3783 #if defined(TARGET_PPC)
3784     target_ip->__seq = tswap32(host_ip->__seq);
3785 #else
3786     target_ip->__seq = tswap16(host_ip->__seq);
3787 #endif
3788     unlock_user_struct(target_sd, target_addr, 1);
3789     return 0;
3790 }
3791 
3792 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3793                                                abi_ulong target_addr)
3794 {
3795     struct target_semid64_ds *target_sd;
3796 
3797     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3798         return -TARGET_EFAULT;
3799     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3800         return -TARGET_EFAULT;
3801     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3802     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3803     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3804     unlock_user_struct(target_sd, target_addr, 0);
3805     return 0;
3806 }
3807 
3808 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3809                                                struct semid_ds *host_sd)
3810 {
3811     struct target_semid64_ds *target_sd;
3812 
3813     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3814         return -TARGET_EFAULT;
3815     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3816         return -TARGET_EFAULT;
3817     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3818     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3819     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3820     unlock_user_struct(target_sd, target_addr, 1);
3821     return 0;
3822 }
3823 
3824 struct target_seminfo {
3825     int semmap;
3826     int semmni;
3827     int semmns;
3828     int semmnu;
3829     int semmsl;
3830     int semopm;
3831     int semume;
3832     int semusz;
3833     int semvmx;
3834     int semaem;
3835 };
3836 
3837 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3838                                               struct seminfo *host_seminfo)
3839 {
3840     struct target_seminfo *target_seminfo;
3841     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3842         return -TARGET_EFAULT;
3843     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3844     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3845     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3846     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3847     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3848     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3849     __put_user(host_seminfo->semume, &target_seminfo->semume);
3850     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3851     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3852     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3853     unlock_user_struct(target_seminfo, target_addr, 1);
3854     return 0;
3855 }
3856 
3857 union semun {
3858 	int val;
3859 	struct semid_ds *buf;
3860 	unsigned short *array;
3861 	struct seminfo *__buf;
3862 };
3863 
3864 union target_semun {
3865 	int val;
3866 	abi_ulong buf;
3867 	abi_ulong array;
3868 	abi_ulong __buf;
3869 };
3870 
3871 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3872                                                abi_ulong target_addr)
3873 {
3874     int nsems;
3875     unsigned short *array;
3876     union semun semun;
3877     struct semid_ds semid_ds;
3878     int i, ret;
3879 
3880     semun.buf = &semid_ds;
3881 
3882     ret = semctl(semid, 0, IPC_STAT, semun);
3883     if (ret == -1)
3884         return get_errno(ret);
3885 
3886     nsems = semid_ds.sem_nsems;
3887 
3888     *host_array = g_try_new(unsigned short, nsems);
3889     if (!*host_array) {
3890         return -TARGET_ENOMEM;
3891     }
3892     array = lock_user(VERIFY_READ, target_addr,
3893                       nsems*sizeof(unsigned short), 1);
3894     if (!array) {
3895         g_free(*host_array);
3896         return -TARGET_EFAULT;
3897     }
3898 
3899     for(i=0; i<nsems; i++) {
3900         __get_user((*host_array)[i], &array[i]);
3901     }
3902     unlock_user(array, target_addr, 0);
3903 
3904     return 0;
3905 }
3906 
3907 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3908                                                unsigned short **host_array)
3909 {
3910     int nsems;
3911     unsigned short *array;
3912     union semun semun;
3913     struct semid_ds semid_ds;
3914     int i, ret;
3915 
3916     semun.buf = &semid_ds;
3917 
3918     ret = semctl(semid, 0, IPC_STAT, semun);
3919     if (ret == -1)
3920         return get_errno(ret);
3921 
3922     nsems = semid_ds.sem_nsems;
3923 
3924     array = lock_user(VERIFY_WRITE, target_addr,
3925                       nsems*sizeof(unsigned short), 0);
3926     if (!array)
3927         return -TARGET_EFAULT;
3928 
3929     for(i=0; i<nsems; i++) {
3930         __put_user((*host_array)[i], &array[i]);
3931     }
3932     g_free(*host_array);
3933     unlock_user(array, target_addr, 1);
3934 
3935     return 0;
3936 }
3937 
3938 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3939                                  abi_ulong target_arg)
3940 {
3941     union target_semun target_su = { .buf = target_arg };
3942     union semun arg;
3943     struct semid_ds dsarg;
3944     unsigned short *array = NULL;
3945     struct seminfo seminfo;
3946     abi_long ret = -TARGET_EINVAL;
3947     abi_long err;
3948     cmd &= 0xff;
3949 
3950     switch( cmd ) {
3951 	case GETVAL:
3952 	case SETVAL:
3953             /* In 64 bit cross-endian situations, we will erroneously pick up
3954              * the wrong half of the union for the "val" element.  To rectify
3955              * this, the entire 8-byte structure is byteswapped, followed by
3956 	     * a swap of the 4 byte val field. In other cases, the data is
3957 	     * already in proper host byte order. */
3958 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3959 		target_su.buf = tswapal(target_su.buf);
3960 		arg.val = tswap32(target_su.val);
3961 	    } else {
3962 		arg.val = target_su.val;
3963 	    }
3964             ret = get_errno(semctl(semid, semnum, cmd, arg));
3965             break;
3966 	case GETALL:
3967 	case SETALL:
3968             err = target_to_host_semarray(semid, &array, target_su.array);
3969             if (err)
3970                 return err;
3971             arg.array = array;
3972             ret = get_errno(semctl(semid, semnum, cmd, arg));
3973             err = host_to_target_semarray(semid, target_su.array, &array);
3974             if (err)
3975                 return err;
3976             break;
3977 	case IPC_STAT:
3978 	case IPC_SET:
3979 	case SEM_STAT:
3980             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3981             if (err)
3982                 return err;
3983             arg.buf = &dsarg;
3984             ret = get_errno(semctl(semid, semnum, cmd, arg));
3985             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3986             if (err)
3987                 return err;
3988             break;
3989 	case IPC_INFO:
3990 	case SEM_INFO:
3991             arg.__buf = &seminfo;
3992             ret = get_errno(semctl(semid, semnum, cmd, arg));
3993             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3994             if (err)
3995                 return err;
3996             break;
3997 	case IPC_RMID:
3998 	case GETPID:
3999 	case GETNCNT:
4000 	case GETZCNT:
4001             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4002             break;
4003     }
4004 
4005     return ret;
4006 }
4007 
4008 struct target_sembuf {
4009     unsigned short sem_num;
4010     short sem_op;
4011     short sem_flg;
4012 };
4013 
4014 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4015                                              abi_ulong target_addr,
4016                                              unsigned nsops)
4017 {
4018     struct target_sembuf *target_sembuf;
4019     int i;
4020 
4021     target_sembuf = lock_user(VERIFY_READ, target_addr,
4022                               nsops*sizeof(struct target_sembuf), 1);
4023     if (!target_sembuf)
4024         return -TARGET_EFAULT;
4025 
4026     for(i=0; i<nsops; i++) {
4027         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4028         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4029         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4030     }
4031 
4032     unlock_user(target_sembuf, target_addr, 0);
4033 
4034     return 0;
4035 }
4036 
4037 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4038     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4039 
4040 /*
4041  * This macro is required to handle the s390 variants, which passes the
4042  * arguments in a different order than default.
4043  */
4044 #ifdef __s390x__
4045 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4046   (__nsops), (__timeout), (__sops)
4047 #else
4048 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4049   (__nsops), 0, (__sops), (__timeout)
4050 #endif
4051 
4052 static inline abi_long do_semtimedop(int semid,
4053                                      abi_long ptr,
4054                                      unsigned nsops,
4055                                      abi_long timeout, bool time64)
4056 {
4057     struct sembuf *sops;
4058     struct timespec ts, *pts = NULL;
4059     abi_long ret;
4060 
4061     if (timeout) {
4062         pts = &ts;
4063         if (time64) {
4064             if (target_to_host_timespec64(pts, timeout)) {
4065                 return -TARGET_EFAULT;
4066             }
4067         } else {
4068             if (target_to_host_timespec(pts, timeout)) {
4069                 return -TARGET_EFAULT;
4070             }
4071         }
4072     }
4073 
4074     if (nsops > TARGET_SEMOPM) {
4075         return -TARGET_E2BIG;
4076     }
4077 
4078     sops = g_new(struct sembuf, nsops);
4079 
4080     if (target_to_host_sembuf(sops, ptr, nsops)) {
4081         g_free(sops);
4082         return -TARGET_EFAULT;
4083     }
4084 
4085     ret = -TARGET_ENOSYS;
4086 #ifdef __NR_semtimedop
4087     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4088 #endif
4089 #ifdef __NR_ipc
4090     if (ret == -TARGET_ENOSYS) {
4091         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4092                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4093     }
4094 #endif
4095     g_free(sops);
4096     return ret;
4097 }
4098 #endif
4099 
4100 struct target_msqid_ds
4101 {
4102     struct target_ipc_perm msg_perm;
4103     abi_ulong msg_stime;
4104 #if TARGET_ABI_BITS == 32
4105     abi_ulong __unused1;
4106 #endif
4107     abi_ulong msg_rtime;
4108 #if TARGET_ABI_BITS == 32
4109     abi_ulong __unused2;
4110 #endif
4111     abi_ulong msg_ctime;
4112 #if TARGET_ABI_BITS == 32
4113     abi_ulong __unused3;
4114 #endif
4115     abi_ulong __msg_cbytes;
4116     abi_ulong msg_qnum;
4117     abi_ulong msg_qbytes;
4118     abi_ulong msg_lspid;
4119     abi_ulong msg_lrpid;
4120     abi_ulong __unused4;
4121     abi_ulong __unused5;
4122 };
4123 
4124 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4125                                                abi_ulong target_addr)
4126 {
4127     struct target_msqid_ds *target_md;
4128 
4129     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4130         return -TARGET_EFAULT;
4131     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4132         return -TARGET_EFAULT;
4133     host_md->msg_stime = tswapal(target_md->msg_stime);
4134     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4135     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4136     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4137     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4138     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4139     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4140     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4141     unlock_user_struct(target_md, target_addr, 0);
4142     return 0;
4143 }
4144 
4145 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4146                                                struct msqid_ds *host_md)
4147 {
4148     struct target_msqid_ds *target_md;
4149 
4150     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4151         return -TARGET_EFAULT;
4152     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4153         return -TARGET_EFAULT;
4154     target_md->msg_stime = tswapal(host_md->msg_stime);
4155     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4156     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4157     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4158     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4159     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4160     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4161     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4162     unlock_user_struct(target_md, target_addr, 1);
4163     return 0;
4164 }
4165 
4166 struct target_msginfo {
4167     int msgpool;
4168     int msgmap;
4169     int msgmax;
4170     int msgmnb;
4171     int msgmni;
4172     int msgssz;
4173     int msgtql;
4174     unsigned short int msgseg;
4175 };
4176 
4177 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4178                                               struct msginfo *host_msginfo)
4179 {
4180     struct target_msginfo *target_msginfo;
4181     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4182         return -TARGET_EFAULT;
4183     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4184     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4185     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4186     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4187     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4188     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4189     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4190     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4191     unlock_user_struct(target_msginfo, target_addr, 1);
4192     return 0;
4193 }
4194 
4195 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4196 {
4197     struct msqid_ds dsarg;
4198     struct msginfo msginfo;
4199     abi_long ret = -TARGET_EINVAL;
4200 
4201     cmd &= 0xff;
4202 
4203     switch (cmd) {
4204     case IPC_STAT:
4205     case IPC_SET:
4206     case MSG_STAT:
4207         if (target_to_host_msqid_ds(&dsarg,ptr))
4208             return -TARGET_EFAULT;
4209         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4210         if (host_to_target_msqid_ds(ptr,&dsarg))
4211             return -TARGET_EFAULT;
4212         break;
4213     case IPC_RMID:
4214         ret = get_errno(msgctl(msgid, cmd, NULL));
4215         break;
4216     case IPC_INFO:
4217     case MSG_INFO:
4218         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4219         if (host_to_target_msginfo(ptr, &msginfo))
4220             return -TARGET_EFAULT;
4221         break;
4222     }
4223 
4224     return ret;
4225 }
4226 
4227 struct target_msgbuf {
4228     abi_long mtype;
4229     char	mtext[1];
4230 };
4231 
4232 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4233                                  ssize_t msgsz, int msgflg)
4234 {
4235     struct target_msgbuf *target_mb;
4236     struct msgbuf *host_mb;
4237     abi_long ret = 0;
4238 
4239     if (msgsz < 0) {
4240         return -TARGET_EINVAL;
4241     }
4242 
4243     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4244         return -TARGET_EFAULT;
4245     host_mb = g_try_malloc(msgsz + sizeof(long));
4246     if (!host_mb) {
4247         unlock_user_struct(target_mb, msgp, 0);
4248         return -TARGET_ENOMEM;
4249     }
4250     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4251     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4252     ret = -TARGET_ENOSYS;
4253 #ifdef __NR_msgsnd
4254     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4255 #endif
4256 #ifdef __NR_ipc
4257     if (ret == -TARGET_ENOSYS) {
4258 #ifdef __s390x__
4259         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4260                                  host_mb));
4261 #else
4262         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4263                                  host_mb, 0));
4264 #endif
4265     }
4266 #endif
4267     g_free(host_mb);
4268     unlock_user_struct(target_mb, msgp, 0);
4269 
4270     return ret;
4271 }
4272 
4273 #ifdef __NR_ipc
4274 #if defined(__sparc__)
4275 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4276 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4277 #elif defined(__s390x__)
4278 /* The s390 sys_ipc variant has only five parameters.  */
4279 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4280     ((long int[]){(long int)__msgp, __msgtyp})
4281 #else
4282 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4283     ((long int[]){(long int)__msgp, __msgtyp}), 0
4284 #endif
4285 #endif
4286 
4287 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4288                                  ssize_t msgsz, abi_long msgtyp,
4289                                  int msgflg)
4290 {
4291     struct target_msgbuf *target_mb;
4292     char *target_mtext;
4293     struct msgbuf *host_mb;
4294     abi_long ret = 0;
4295 
4296     if (msgsz < 0) {
4297         return -TARGET_EINVAL;
4298     }
4299 
4300     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4301         return -TARGET_EFAULT;
4302 
4303     host_mb = g_try_malloc(msgsz + sizeof(long));
4304     if (!host_mb) {
4305         ret = -TARGET_ENOMEM;
4306         goto end;
4307     }
4308     ret = -TARGET_ENOSYS;
4309 #ifdef __NR_msgrcv
4310     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4311 #endif
4312 #ifdef __NR_ipc
4313     if (ret == -TARGET_ENOSYS) {
4314         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4315                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4316     }
4317 #endif
4318 
4319     if (ret > 0) {
4320         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4321         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4322         if (!target_mtext) {
4323             ret = -TARGET_EFAULT;
4324             goto end;
4325         }
4326         memcpy(target_mb->mtext, host_mb->mtext, ret);
4327         unlock_user(target_mtext, target_mtext_addr, ret);
4328     }
4329 
4330     target_mb->mtype = tswapal(host_mb->mtype);
4331 
4332 end:
4333     if (target_mb)
4334         unlock_user_struct(target_mb, msgp, 1);
4335     g_free(host_mb);
4336     return ret;
4337 }
4338 
4339 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4340                                                abi_ulong target_addr)
4341 {
4342     struct target_shmid_ds *target_sd;
4343 
4344     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4345         return -TARGET_EFAULT;
4346     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4347         return -TARGET_EFAULT;
4348     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4349     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4350     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4351     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4352     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4353     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4354     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4355     unlock_user_struct(target_sd, target_addr, 0);
4356     return 0;
4357 }
4358 
4359 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4360                                                struct shmid_ds *host_sd)
4361 {
4362     struct target_shmid_ds *target_sd;
4363 
4364     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4365         return -TARGET_EFAULT;
4366     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4367         return -TARGET_EFAULT;
4368     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4369     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4370     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4371     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4372     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4373     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4374     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4375     unlock_user_struct(target_sd, target_addr, 1);
4376     return 0;
4377 }
4378 
4379 struct  target_shminfo {
4380     abi_ulong shmmax;
4381     abi_ulong shmmin;
4382     abi_ulong shmmni;
4383     abi_ulong shmseg;
4384     abi_ulong shmall;
4385 };
4386 
4387 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4388                                               struct shminfo *host_shminfo)
4389 {
4390     struct target_shminfo *target_shminfo;
4391     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4392         return -TARGET_EFAULT;
4393     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4394     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4395     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4396     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4397     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4398     unlock_user_struct(target_shminfo, target_addr, 1);
4399     return 0;
4400 }
4401 
4402 struct target_shm_info {
4403     int used_ids;
4404     abi_ulong shm_tot;
4405     abi_ulong shm_rss;
4406     abi_ulong shm_swp;
4407     abi_ulong swap_attempts;
4408     abi_ulong swap_successes;
4409 };
4410 
4411 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4412                                                struct shm_info *host_shm_info)
4413 {
4414     struct target_shm_info *target_shm_info;
4415     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4416         return -TARGET_EFAULT;
4417     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4418     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4419     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4420     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4421     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4422     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4423     unlock_user_struct(target_shm_info, target_addr, 1);
4424     return 0;
4425 }
4426 
4427 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4428 {
4429     struct shmid_ds dsarg;
4430     struct shminfo shminfo;
4431     struct shm_info shm_info;
4432     abi_long ret = -TARGET_EINVAL;
4433 
4434     cmd &= 0xff;
4435 
4436     switch(cmd) {
4437     case IPC_STAT:
4438     case IPC_SET:
4439     case SHM_STAT:
4440         if (target_to_host_shmid_ds(&dsarg, buf))
4441             return -TARGET_EFAULT;
4442         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4443         if (host_to_target_shmid_ds(buf, &dsarg))
4444             return -TARGET_EFAULT;
4445         break;
4446     case IPC_INFO:
4447         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4448         if (host_to_target_shminfo(buf, &shminfo))
4449             return -TARGET_EFAULT;
4450         break;
4451     case SHM_INFO:
4452         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4453         if (host_to_target_shm_info(buf, &shm_info))
4454             return -TARGET_EFAULT;
4455         break;
4456     case IPC_RMID:
4457     case SHM_LOCK:
4458     case SHM_UNLOCK:
4459         ret = get_errno(shmctl(shmid, cmd, NULL));
4460         break;
4461     }
4462 
4463     return ret;
4464 }
4465 
4466 #ifndef TARGET_FORCE_SHMLBA
4467 /* For most architectures, SHMLBA is the same as the page size;
4468  * some architectures have larger values, in which case they should
4469  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4470  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4471  * and defining its own value for SHMLBA.
4472  *
4473  * The kernel also permits SHMLBA to be set by the architecture to a
4474  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4475  * this means that addresses are rounded to the large size if
4476  * SHM_RND is set but addresses not aligned to that size are not rejected
4477  * as long as they are at least page-aligned. Since the only architecture
4478  * which uses this is ia64 this code doesn't provide for that oddity.
4479  */
4480 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4481 {
4482     return TARGET_PAGE_SIZE;
4483 }
4484 #endif
4485 
4486 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4487                                  int shmid, abi_ulong shmaddr, int shmflg)
4488 {
4489     CPUState *cpu = env_cpu(cpu_env);
4490     abi_long raddr;
4491     void *host_raddr;
4492     struct shmid_ds shm_info;
4493     int i,ret;
4494     abi_ulong shmlba;
4495 
4496     /* shmat pointers are always untagged */
4497 
4498     /* find out the length of the shared memory segment */
4499     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4500     if (is_error(ret)) {
4501         /* can't get length, bail out */
4502         return ret;
4503     }
4504 
4505     shmlba = target_shmlba(cpu_env);
4506 
4507     if (shmaddr & (shmlba - 1)) {
4508         if (shmflg & SHM_RND) {
4509             shmaddr &= ~(shmlba - 1);
4510         } else {
4511             return -TARGET_EINVAL;
4512         }
4513     }
4514     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4515         return -TARGET_EINVAL;
4516     }
4517 
4518     mmap_lock();
4519 
4520     /*
4521      * We're mapping shared memory, so ensure we generate code for parallel
4522      * execution and flush old translations.  This will work up to the level
4523      * supported by the host -- anything that requires EXCP_ATOMIC will not
4524      * be atomic with respect to an external process.
4525      */
4526     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4527         cpu->tcg_cflags |= CF_PARALLEL;
4528         tb_flush(cpu);
4529     }
4530 
4531     if (shmaddr)
4532         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4533     else {
4534         abi_ulong mmap_start;
4535 
4536         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4537         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4538 
4539         if (mmap_start == -1) {
4540             errno = ENOMEM;
4541             host_raddr = (void *)-1;
4542         } else
4543             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4544                                shmflg | SHM_REMAP);
4545     }
4546 
4547     if (host_raddr == (void *)-1) {
4548         mmap_unlock();
4549         return get_errno((long)host_raddr);
4550     }
4551     raddr=h2g((unsigned long)host_raddr);
4552 
4553     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4554                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4555                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4556 
4557     for (i = 0; i < N_SHM_REGIONS; i++) {
4558         if (!shm_regions[i].in_use) {
4559             shm_regions[i].in_use = true;
4560             shm_regions[i].start = raddr;
4561             shm_regions[i].size = shm_info.shm_segsz;
4562             break;
4563         }
4564     }
4565 
4566     mmap_unlock();
4567     return raddr;
4568 
4569 }
4570 
4571 static inline abi_long do_shmdt(abi_ulong shmaddr)
4572 {
4573     int i;
4574     abi_long rv;
4575 
4576     /* shmdt pointers are always untagged */
4577 
4578     mmap_lock();
4579 
4580     for (i = 0; i < N_SHM_REGIONS; ++i) {
4581         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4582             shm_regions[i].in_use = false;
4583             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4584             break;
4585         }
4586     }
4587     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4588 
4589     mmap_unlock();
4590 
4591     return rv;
4592 }
4593 
4594 #ifdef TARGET_NR_ipc
4595 /* ??? This only works with linear mappings.  */
4596 /* do_ipc() must return target values and target errnos. */
4597 static abi_long do_ipc(CPUArchState *cpu_env,
4598                        unsigned int call, abi_long first,
4599                        abi_long second, abi_long third,
4600                        abi_long ptr, abi_long fifth)
4601 {
4602     int version;
4603     abi_long ret = 0;
4604 
4605     version = call >> 16;
4606     call &= 0xffff;
4607 
4608     switch (call) {
4609     case IPCOP_semop:
4610         ret = do_semtimedop(first, ptr, second, 0, false);
4611         break;
4612     case IPCOP_semtimedop:
4613     /*
4614      * The s390 sys_ipc variant has only five parameters instead of six
4615      * (as for default variant) and the only difference is the handling of
4616      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4617      * to a struct timespec where the generic variant uses fifth parameter.
4618      */
4619 #if defined(TARGET_S390X)
4620         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4621 #else
4622         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4623 #endif
4624         break;
4625 
4626     case IPCOP_semget:
4627         ret = get_errno(semget(first, second, third));
4628         break;
4629 
4630     case IPCOP_semctl: {
4631         /* The semun argument to semctl is passed by value, so dereference the
4632          * ptr argument. */
4633         abi_ulong atptr;
4634         get_user_ual(atptr, ptr);
4635         ret = do_semctl(first, second, third, atptr);
4636         break;
4637     }
4638 
4639     case IPCOP_msgget:
4640         ret = get_errno(msgget(first, second));
4641         break;
4642 
4643     case IPCOP_msgsnd:
4644         ret = do_msgsnd(first, ptr, second, third);
4645         break;
4646 
4647     case IPCOP_msgctl:
4648         ret = do_msgctl(first, second, ptr);
4649         break;
4650 
4651     case IPCOP_msgrcv:
4652         switch (version) {
4653         case 0:
4654             {
4655                 struct target_ipc_kludge {
4656                     abi_long msgp;
4657                     abi_long msgtyp;
4658                 } *tmp;
4659 
4660                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4661                     ret = -TARGET_EFAULT;
4662                     break;
4663                 }
4664 
4665                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4666 
4667                 unlock_user_struct(tmp, ptr, 0);
4668                 break;
4669             }
4670         default:
4671             ret = do_msgrcv(first, ptr, second, fifth, third);
4672         }
4673         break;
4674 
4675     case IPCOP_shmat:
4676         switch (version) {
4677         default:
4678         {
4679             abi_ulong raddr;
4680             raddr = do_shmat(cpu_env, first, ptr, second);
4681             if (is_error(raddr))
4682                 return get_errno(raddr);
4683             if (put_user_ual(raddr, third))
4684                 return -TARGET_EFAULT;
4685             break;
4686         }
4687         case 1:
4688             ret = -TARGET_EINVAL;
4689             break;
4690         }
4691 	break;
4692     case IPCOP_shmdt:
4693         ret = do_shmdt(ptr);
4694 	break;
4695 
4696     case IPCOP_shmget:
4697 	/* IPC_* flag values are the same on all linux platforms */
4698 	ret = get_errno(shmget(first, second, third));
4699 	break;
4700 
4701 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4702     case IPCOP_shmctl:
4703         ret = do_shmctl(first, second, ptr);
4704         break;
4705     default:
4706         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4707                       call, version);
4708 	ret = -TARGET_ENOSYS;
4709 	break;
4710     }
4711     return ret;
4712 }
4713 #endif
4714 
4715 /* kernel structure types definitions */
4716 
4717 #define STRUCT(name, ...) STRUCT_ ## name,
4718 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4719 enum {
4720 #include "syscall_types.h"
4721 STRUCT_MAX
4722 };
4723 #undef STRUCT
4724 #undef STRUCT_SPECIAL
4725 
4726 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4727 #define STRUCT_SPECIAL(name)
4728 #include "syscall_types.h"
4729 #undef STRUCT
4730 #undef STRUCT_SPECIAL
4731 
4732 #define MAX_STRUCT_SIZE 4096
4733 
4734 #ifdef CONFIG_FIEMAP
4735 /* So fiemap access checks don't overflow on 32 bit systems.
4736  * This is very slightly smaller than the limit imposed by
4737  * the underlying kernel.
4738  */
4739 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4740                             / sizeof(struct fiemap_extent))
4741 
4742 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4743                                        int fd, int cmd, abi_long arg)
4744 {
4745     /* The parameter for this ioctl is a struct fiemap followed
4746      * by an array of struct fiemap_extent whose size is set
4747      * in fiemap->fm_extent_count. The array is filled in by the
4748      * ioctl.
4749      */
4750     int target_size_in, target_size_out;
4751     struct fiemap *fm;
4752     const argtype *arg_type = ie->arg_type;
4753     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4754     void *argptr, *p;
4755     abi_long ret;
4756     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4757     uint32_t outbufsz;
4758     int free_fm = 0;
4759 
4760     assert(arg_type[0] == TYPE_PTR);
4761     assert(ie->access == IOC_RW);
4762     arg_type++;
4763     target_size_in = thunk_type_size(arg_type, 0);
4764     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4765     if (!argptr) {
4766         return -TARGET_EFAULT;
4767     }
4768     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4769     unlock_user(argptr, arg, 0);
4770     fm = (struct fiemap *)buf_temp;
4771     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4772         return -TARGET_EINVAL;
4773     }
4774 
4775     outbufsz = sizeof (*fm) +
4776         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4777 
4778     if (outbufsz > MAX_STRUCT_SIZE) {
4779         /* We can't fit all the extents into the fixed size buffer.
4780          * Allocate one that is large enough and use it instead.
4781          */
4782         fm = g_try_malloc(outbufsz);
4783         if (!fm) {
4784             return -TARGET_ENOMEM;
4785         }
4786         memcpy(fm, buf_temp, sizeof(struct fiemap));
4787         free_fm = 1;
4788     }
4789     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4790     if (!is_error(ret)) {
4791         target_size_out = target_size_in;
4792         /* An extent_count of 0 means we were only counting the extents
4793          * so there are no structs to copy
4794          */
4795         if (fm->fm_extent_count != 0) {
4796             target_size_out += fm->fm_mapped_extents * extent_size;
4797         }
4798         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4799         if (!argptr) {
4800             ret = -TARGET_EFAULT;
4801         } else {
4802             /* Convert the struct fiemap */
4803             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4804             if (fm->fm_extent_count != 0) {
4805                 p = argptr + target_size_in;
4806                 /* ...and then all the struct fiemap_extents */
4807                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4808                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4809                                   THUNK_TARGET);
4810                     p += extent_size;
4811                 }
4812             }
4813             unlock_user(argptr, arg, target_size_out);
4814         }
4815     }
4816     if (free_fm) {
4817         g_free(fm);
4818     }
4819     return ret;
4820 }
4821 #endif
4822 
4823 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4824                                 int fd, int cmd, abi_long arg)
4825 {
4826     const argtype *arg_type = ie->arg_type;
4827     int target_size;
4828     void *argptr;
4829     int ret;
4830     struct ifconf *host_ifconf;
4831     uint32_t outbufsz;
4832     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4833     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4834     int target_ifreq_size;
4835     int nb_ifreq;
4836     int free_buf = 0;
4837     int i;
4838     int target_ifc_len;
4839     abi_long target_ifc_buf;
4840     int host_ifc_len;
4841     char *host_ifc_buf;
4842 
4843     assert(arg_type[0] == TYPE_PTR);
4844     assert(ie->access == IOC_RW);
4845 
4846     arg_type++;
4847     target_size = thunk_type_size(arg_type, 0);
4848 
4849     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4850     if (!argptr)
4851         return -TARGET_EFAULT;
4852     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4853     unlock_user(argptr, arg, 0);
4854 
4855     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4856     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4857     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4858 
4859     if (target_ifc_buf != 0) {
4860         target_ifc_len = host_ifconf->ifc_len;
4861         nb_ifreq = target_ifc_len / target_ifreq_size;
4862         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4863 
4864         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4865         if (outbufsz > MAX_STRUCT_SIZE) {
4866             /*
4867              * We can't fit all the extents into the fixed size buffer.
4868              * Allocate one that is large enough and use it instead.
4869              */
4870             host_ifconf = malloc(outbufsz);
4871             if (!host_ifconf) {
4872                 return -TARGET_ENOMEM;
4873             }
4874             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4875             free_buf = 1;
4876         }
4877         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4878 
4879         host_ifconf->ifc_len = host_ifc_len;
4880     } else {
4881       host_ifc_buf = NULL;
4882     }
4883     host_ifconf->ifc_buf = host_ifc_buf;
4884 
4885     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4886     if (!is_error(ret)) {
4887 	/* convert host ifc_len to target ifc_len */
4888 
4889         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4890         target_ifc_len = nb_ifreq * target_ifreq_size;
4891         host_ifconf->ifc_len = target_ifc_len;
4892 
4893 	/* restore target ifc_buf */
4894 
4895         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4896 
4897 	/* copy struct ifconf to target user */
4898 
4899         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4900         if (!argptr)
4901             return -TARGET_EFAULT;
4902         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4903         unlock_user(argptr, arg, target_size);
4904 
4905         if (target_ifc_buf != 0) {
4906             /* copy ifreq[] to target user */
4907             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4908             for (i = 0; i < nb_ifreq ; i++) {
4909                 thunk_convert(argptr + i * target_ifreq_size,
4910                               host_ifc_buf + i * sizeof(struct ifreq),
4911                               ifreq_arg_type, THUNK_TARGET);
4912             }
4913             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4914         }
4915     }
4916 
4917     if (free_buf) {
4918         free(host_ifconf);
4919     }
4920 
4921     return ret;
4922 }
4923 
4924 #if defined(CONFIG_USBFS)
4925 #if HOST_LONG_BITS > 64
4926 #error USBDEVFS thunks do not support >64 bit hosts yet.
4927 #endif
4928 struct live_urb {
4929     uint64_t target_urb_adr;
4930     uint64_t target_buf_adr;
4931     char *target_buf_ptr;
4932     struct usbdevfs_urb host_urb;
4933 };
4934 
4935 static GHashTable *usbdevfs_urb_hashtable(void)
4936 {
4937     static GHashTable *urb_hashtable;
4938 
4939     if (!urb_hashtable) {
4940         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4941     }
4942     return urb_hashtable;
4943 }
4944 
4945 static void urb_hashtable_insert(struct live_urb *urb)
4946 {
4947     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4948     g_hash_table_insert(urb_hashtable, urb, urb);
4949 }
4950 
4951 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4952 {
4953     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4954     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4955 }
4956 
4957 static void urb_hashtable_remove(struct live_urb *urb)
4958 {
4959     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4960     g_hash_table_remove(urb_hashtable, urb);
4961 }
4962 
4963 static abi_long
4964 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4965                           int fd, int cmd, abi_long arg)
4966 {
4967     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4968     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4969     struct live_urb *lurb;
4970     void *argptr;
4971     uint64_t hurb;
4972     int target_size;
4973     uintptr_t target_urb_adr;
4974     abi_long ret;
4975 
4976     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4977 
4978     memset(buf_temp, 0, sizeof(uint64_t));
4979     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4980     if (is_error(ret)) {
4981         return ret;
4982     }
4983 
4984     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4985     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4986     if (!lurb->target_urb_adr) {
4987         return -TARGET_EFAULT;
4988     }
4989     urb_hashtable_remove(lurb);
4990     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4991         lurb->host_urb.buffer_length);
4992     lurb->target_buf_ptr = NULL;
4993 
4994     /* restore the guest buffer pointer */
4995     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4996 
4997     /* update the guest urb struct */
4998     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4999     if (!argptr) {
5000         g_free(lurb);
5001         return -TARGET_EFAULT;
5002     }
5003     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5004     unlock_user(argptr, lurb->target_urb_adr, target_size);
5005 
5006     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5007     /* write back the urb handle */
5008     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5009     if (!argptr) {
5010         g_free(lurb);
5011         return -TARGET_EFAULT;
5012     }
5013 
5014     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5015     target_urb_adr = lurb->target_urb_adr;
5016     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5017     unlock_user(argptr, arg, target_size);
5018 
5019     g_free(lurb);
5020     return ret;
5021 }
5022 
5023 static abi_long
5024 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5025                              uint8_t *buf_temp __attribute__((unused)),
5026                              int fd, int cmd, abi_long arg)
5027 {
5028     struct live_urb *lurb;
5029 
5030     /* map target address back to host URB with metadata. */
5031     lurb = urb_hashtable_lookup(arg);
5032     if (!lurb) {
5033         return -TARGET_EFAULT;
5034     }
5035     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5036 }
5037 
5038 static abi_long
5039 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5040                             int fd, int cmd, abi_long arg)
5041 {
5042     const argtype *arg_type = ie->arg_type;
5043     int target_size;
5044     abi_long ret;
5045     void *argptr;
5046     int rw_dir;
5047     struct live_urb *lurb;
5048 
5049     /*
5050      * each submitted URB needs to map to a unique ID for the
5051      * kernel, and that unique ID needs to be a pointer to
5052      * host memory.  hence, we need to malloc for each URB.
5053      * isochronous transfers have a variable length struct.
5054      */
5055     arg_type++;
5056     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5057 
5058     /* construct host copy of urb and metadata */
5059     lurb = g_try_malloc0(sizeof(struct live_urb));
5060     if (!lurb) {
5061         return -TARGET_ENOMEM;
5062     }
5063 
5064     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5065     if (!argptr) {
5066         g_free(lurb);
5067         return -TARGET_EFAULT;
5068     }
5069     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5070     unlock_user(argptr, arg, 0);
5071 
5072     lurb->target_urb_adr = arg;
5073     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5074 
5075     /* buffer space used depends on endpoint type so lock the entire buffer */
5076     /* control type urbs should check the buffer contents for true direction */
5077     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5078     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5079         lurb->host_urb.buffer_length, 1);
5080     if (lurb->target_buf_ptr == NULL) {
5081         g_free(lurb);
5082         return -TARGET_EFAULT;
5083     }
5084 
5085     /* update buffer pointer in host copy */
5086     lurb->host_urb.buffer = lurb->target_buf_ptr;
5087 
5088     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5089     if (is_error(ret)) {
5090         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5091         g_free(lurb);
5092     } else {
5093         urb_hashtable_insert(lurb);
5094     }
5095 
5096     return ret;
5097 }
5098 #endif /* CONFIG_USBFS */
5099 
5100 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5101                             int cmd, abi_long arg)
5102 {
5103     void *argptr;
5104     struct dm_ioctl *host_dm;
5105     abi_long guest_data;
5106     uint32_t guest_data_size;
5107     int target_size;
5108     const argtype *arg_type = ie->arg_type;
5109     abi_long ret;
5110     void *big_buf = NULL;
5111     char *host_data;
5112 
5113     arg_type++;
5114     target_size = thunk_type_size(arg_type, 0);
5115     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5116     if (!argptr) {
5117         ret = -TARGET_EFAULT;
5118         goto out;
5119     }
5120     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5121     unlock_user(argptr, arg, 0);
5122 
5123     /* buf_temp is too small, so fetch things into a bigger buffer */
5124     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5125     memcpy(big_buf, buf_temp, target_size);
5126     buf_temp = big_buf;
5127     host_dm = big_buf;
5128 
5129     guest_data = arg + host_dm->data_start;
5130     if ((guest_data - arg) < 0) {
5131         ret = -TARGET_EINVAL;
5132         goto out;
5133     }
5134     guest_data_size = host_dm->data_size - host_dm->data_start;
5135     host_data = (char*)host_dm + host_dm->data_start;
5136 
5137     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5138     if (!argptr) {
5139         ret = -TARGET_EFAULT;
5140         goto out;
5141     }
5142 
5143     switch (ie->host_cmd) {
5144     case DM_REMOVE_ALL:
5145     case DM_LIST_DEVICES:
5146     case DM_DEV_CREATE:
5147     case DM_DEV_REMOVE:
5148     case DM_DEV_SUSPEND:
5149     case DM_DEV_STATUS:
5150     case DM_DEV_WAIT:
5151     case DM_TABLE_STATUS:
5152     case DM_TABLE_CLEAR:
5153     case DM_TABLE_DEPS:
5154     case DM_LIST_VERSIONS:
5155         /* no input data */
5156         break;
5157     case DM_DEV_RENAME:
5158     case DM_DEV_SET_GEOMETRY:
5159         /* data contains only strings */
5160         memcpy(host_data, argptr, guest_data_size);
5161         break;
5162     case DM_TARGET_MSG:
5163         memcpy(host_data, argptr, guest_data_size);
5164         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5165         break;
5166     case DM_TABLE_LOAD:
5167     {
5168         void *gspec = argptr;
5169         void *cur_data = host_data;
5170         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5171         int spec_size = thunk_type_size(arg_type, 0);
5172         int i;
5173 
5174         for (i = 0; i < host_dm->target_count; i++) {
5175             struct dm_target_spec *spec = cur_data;
5176             uint32_t next;
5177             int slen;
5178 
5179             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5180             slen = strlen((char*)gspec + spec_size) + 1;
5181             next = spec->next;
5182             spec->next = sizeof(*spec) + slen;
5183             strcpy((char*)&spec[1], gspec + spec_size);
5184             gspec += next;
5185             cur_data += spec->next;
5186         }
5187         break;
5188     }
5189     default:
5190         ret = -TARGET_EINVAL;
5191         unlock_user(argptr, guest_data, 0);
5192         goto out;
5193     }
5194     unlock_user(argptr, guest_data, 0);
5195 
5196     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5197     if (!is_error(ret)) {
5198         guest_data = arg + host_dm->data_start;
5199         guest_data_size = host_dm->data_size - host_dm->data_start;
5200         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5201         switch (ie->host_cmd) {
5202         case DM_REMOVE_ALL:
5203         case DM_DEV_CREATE:
5204         case DM_DEV_REMOVE:
5205         case DM_DEV_RENAME:
5206         case DM_DEV_SUSPEND:
5207         case DM_DEV_STATUS:
5208         case DM_TABLE_LOAD:
5209         case DM_TABLE_CLEAR:
5210         case DM_TARGET_MSG:
5211         case DM_DEV_SET_GEOMETRY:
5212             /* no return data */
5213             break;
5214         case DM_LIST_DEVICES:
5215         {
5216             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5217             uint32_t remaining_data = guest_data_size;
5218             void *cur_data = argptr;
5219             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5220             int nl_size = 12; /* can't use thunk_size due to alignment */
5221 
5222             while (1) {
5223                 uint32_t next = nl->next;
5224                 if (next) {
5225                     nl->next = nl_size + (strlen(nl->name) + 1);
5226                 }
5227                 if (remaining_data < nl->next) {
5228                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5229                     break;
5230                 }
5231                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5232                 strcpy(cur_data + nl_size, nl->name);
5233                 cur_data += nl->next;
5234                 remaining_data -= nl->next;
5235                 if (!next) {
5236                     break;
5237                 }
5238                 nl = (void*)nl + next;
5239             }
5240             break;
5241         }
5242         case DM_DEV_WAIT:
5243         case DM_TABLE_STATUS:
5244         {
5245             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5246             void *cur_data = argptr;
5247             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5248             int spec_size = thunk_type_size(arg_type, 0);
5249             int i;
5250 
5251             for (i = 0; i < host_dm->target_count; i++) {
5252                 uint32_t next = spec->next;
5253                 int slen = strlen((char*)&spec[1]) + 1;
5254                 spec->next = (cur_data - argptr) + spec_size + slen;
5255                 if (guest_data_size < spec->next) {
5256                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5257                     break;
5258                 }
5259                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5260                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5261                 cur_data = argptr + spec->next;
5262                 spec = (void*)host_dm + host_dm->data_start + next;
5263             }
5264             break;
5265         }
5266         case DM_TABLE_DEPS:
5267         {
5268             void *hdata = (void*)host_dm + host_dm->data_start;
5269             int count = *(uint32_t*)hdata;
5270             uint64_t *hdev = hdata + 8;
5271             uint64_t *gdev = argptr + 8;
5272             int i;
5273 
5274             *(uint32_t*)argptr = tswap32(count);
5275             for (i = 0; i < count; i++) {
5276                 *gdev = tswap64(*hdev);
5277                 gdev++;
5278                 hdev++;
5279             }
5280             break;
5281         }
5282         case DM_LIST_VERSIONS:
5283         {
5284             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5285             uint32_t remaining_data = guest_data_size;
5286             void *cur_data = argptr;
5287             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5288             int vers_size = thunk_type_size(arg_type, 0);
5289 
5290             while (1) {
5291                 uint32_t next = vers->next;
5292                 if (next) {
5293                     vers->next = vers_size + (strlen(vers->name) + 1);
5294                 }
5295                 if (remaining_data < vers->next) {
5296                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5297                     break;
5298                 }
5299                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5300                 strcpy(cur_data + vers_size, vers->name);
5301                 cur_data += vers->next;
5302                 remaining_data -= vers->next;
5303                 if (!next) {
5304                     break;
5305                 }
5306                 vers = (void*)vers + next;
5307             }
5308             break;
5309         }
5310         default:
5311             unlock_user(argptr, guest_data, 0);
5312             ret = -TARGET_EINVAL;
5313             goto out;
5314         }
5315         unlock_user(argptr, guest_data, guest_data_size);
5316 
5317         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5318         if (!argptr) {
5319             ret = -TARGET_EFAULT;
5320             goto out;
5321         }
5322         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5323         unlock_user(argptr, arg, target_size);
5324     }
5325 out:
5326     g_free(big_buf);
5327     return ret;
5328 }
5329 
5330 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5331                                int cmd, abi_long arg)
5332 {
5333     void *argptr;
5334     int target_size;
5335     const argtype *arg_type = ie->arg_type;
5336     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5337     abi_long ret;
5338 
5339     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5340     struct blkpg_partition host_part;
5341 
5342     /* Read and convert blkpg */
5343     arg_type++;
5344     target_size = thunk_type_size(arg_type, 0);
5345     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5346     if (!argptr) {
5347         ret = -TARGET_EFAULT;
5348         goto out;
5349     }
5350     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5351     unlock_user(argptr, arg, 0);
5352 
5353     switch (host_blkpg->op) {
5354     case BLKPG_ADD_PARTITION:
5355     case BLKPG_DEL_PARTITION:
5356         /* payload is struct blkpg_partition */
5357         break;
5358     default:
5359         /* Unknown opcode */
5360         ret = -TARGET_EINVAL;
5361         goto out;
5362     }
5363 
5364     /* Read and convert blkpg->data */
5365     arg = (abi_long)(uintptr_t)host_blkpg->data;
5366     target_size = thunk_type_size(part_arg_type, 0);
5367     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5368     if (!argptr) {
5369         ret = -TARGET_EFAULT;
5370         goto out;
5371     }
5372     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5373     unlock_user(argptr, arg, 0);
5374 
5375     /* Swizzle the data pointer to our local copy and call! */
5376     host_blkpg->data = &host_part;
5377     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5378 
5379 out:
5380     return ret;
5381 }
5382 
5383 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5384                                 int fd, int cmd, abi_long arg)
5385 {
5386     const argtype *arg_type = ie->arg_type;
5387     const StructEntry *se;
5388     const argtype *field_types;
5389     const int *dst_offsets, *src_offsets;
5390     int target_size;
5391     void *argptr;
5392     abi_ulong *target_rt_dev_ptr = NULL;
5393     unsigned long *host_rt_dev_ptr = NULL;
5394     abi_long ret;
5395     int i;
5396 
5397     assert(ie->access == IOC_W);
5398     assert(*arg_type == TYPE_PTR);
5399     arg_type++;
5400     assert(*arg_type == TYPE_STRUCT);
5401     target_size = thunk_type_size(arg_type, 0);
5402     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5403     if (!argptr) {
5404         return -TARGET_EFAULT;
5405     }
5406     arg_type++;
5407     assert(*arg_type == (int)STRUCT_rtentry);
5408     se = struct_entries + *arg_type++;
5409     assert(se->convert[0] == NULL);
5410     /* convert struct here to be able to catch rt_dev string */
5411     field_types = se->field_types;
5412     dst_offsets = se->field_offsets[THUNK_HOST];
5413     src_offsets = se->field_offsets[THUNK_TARGET];
5414     for (i = 0; i < se->nb_fields; i++) {
5415         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5416             assert(*field_types == TYPE_PTRVOID);
5417             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5418             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5419             if (*target_rt_dev_ptr != 0) {
5420                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5421                                                   tswapal(*target_rt_dev_ptr));
5422                 if (!*host_rt_dev_ptr) {
5423                     unlock_user(argptr, arg, 0);
5424                     return -TARGET_EFAULT;
5425                 }
5426             } else {
5427                 *host_rt_dev_ptr = 0;
5428             }
5429             field_types++;
5430             continue;
5431         }
5432         field_types = thunk_convert(buf_temp + dst_offsets[i],
5433                                     argptr + src_offsets[i],
5434                                     field_types, THUNK_HOST);
5435     }
5436     unlock_user(argptr, arg, 0);
5437 
5438     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5439 
5440     assert(host_rt_dev_ptr != NULL);
5441     assert(target_rt_dev_ptr != NULL);
5442     if (*host_rt_dev_ptr != 0) {
5443         unlock_user((void *)*host_rt_dev_ptr,
5444                     *target_rt_dev_ptr, 0);
5445     }
5446     return ret;
5447 }
5448 
5449 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5450                                      int fd, int cmd, abi_long arg)
5451 {
5452     int sig = target_to_host_signal(arg);
5453     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5454 }
5455 
5456 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5457                                     int fd, int cmd, abi_long arg)
5458 {
5459     struct timeval tv;
5460     abi_long ret;
5461 
5462     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5463     if (is_error(ret)) {
5464         return ret;
5465     }
5466 
5467     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5468         if (copy_to_user_timeval(arg, &tv)) {
5469             return -TARGET_EFAULT;
5470         }
5471     } else {
5472         if (copy_to_user_timeval64(arg, &tv)) {
5473             return -TARGET_EFAULT;
5474         }
5475     }
5476 
5477     return ret;
5478 }
5479 
5480 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5481                                       int fd, int cmd, abi_long arg)
5482 {
5483     struct timespec ts;
5484     abi_long ret;
5485 
5486     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5487     if (is_error(ret)) {
5488         return ret;
5489     }
5490 
5491     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5492         if (host_to_target_timespec(arg, &ts)) {
5493             return -TARGET_EFAULT;
5494         }
5495     } else{
5496         if (host_to_target_timespec64(arg, &ts)) {
5497             return -TARGET_EFAULT;
5498         }
5499     }
5500 
5501     return ret;
5502 }
5503 
5504 #ifdef TIOCGPTPEER
5505 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5506                                      int fd, int cmd, abi_long arg)
5507 {
5508     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5509     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5510 }
5511 #endif
5512 
5513 #ifdef HAVE_DRM_H
5514 
5515 static void unlock_drm_version(struct drm_version *host_ver,
5516                                struct target_drm_version *target_ver,
5517                                bool copy)
5518 {
5519     unlock_user(host_ver->name, target_ver->name,
5520                                 copy ? host_ver->name_len : 0);
5521     unlock_user(host_ver->date, target_ver->date,
5522                                 copy ? host_ver->date_len : 0);
5523     unlock_user(host_ver->desc, target_ver->desc,
5524                                 copy ? host_ver->desc_len : 0);
5525 }
5526 
5527 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5528                                           struct target_drm_version *target_ver)
5529 {
5530     memset(host_ver, 0, sizeof(*host_ver));
5531 
5532     __get_user(host_ver->name_len, &target_ver->name_len);
5533     if (host_ver->name_len) {
5534         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5535                                    target_ver->name_len, 0);
5536         if (!host_ver->name) {
5537             return -EFAULT;
5538         }
5539     }
5540 
5541     __get_user(host_ver->date_len, &target_ver->date_len);
5542     if (host_ver->date_len) {
5543         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5544                                    target_ver->date_len, 0);
5545         if (!host_ver->date) {
5546             goto err;
5547         }
5548     }
5549 
5550     __get_user(host_ver->desc_len, &target_ver->desc_len);
5551     if (host_ver->desc_len) {
5552         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5553                                    target_ver->desc_len, 0);
5554         if (!host_ver->desc) {
5555             goto err;
5556         }
5557     }
5558 
5559     return 0;
5560 err:
5561     unlock_drm_version(host_ver, target_ver, false);
5562     return -EFAULT;
5563 }
5564 
5565 static inline void host_to_target_drmversion(
5566                                           struct target_drm_version *target_ver,
5567                                           struct drm_version *host_ver)
5568 {
5569     __put_user(host_ver->version_major, &target_ver->version_major);
5570     __put_user(host_ver->version_minor, &target_ver->version_minor);
5571     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5572     __put_user(host_ver->name_len, &target_ver->name_len);
5573     __put_user(host_ver->date_len, &target_ver->date_len);
5574     __put_user(host_ver->desc_len, &target_ver->desc_len);
5575     unlock_drm_version(host_ver, target_ver, true);
5576 }
5577 
5578 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5579                              int fd, int cmd, abi_long arg)
5580 {
5581     struct drm_version *ver;
5582     struct target_drm_version *target_ver;
5583     abi_long ret;
5584 
5585     switch (ie->host_cmd) {
5586     case DRM_IOCTL_VERSION:
5587         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5588             return -TARGET_EFAULT;
5589         }
5590         ver = (struct drm_version *)buf_temp;
5591         ret = target_to_host_drmversion(ver, target_ver);
5592         if (!is_error(ret)) {
5593             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5594             if (is_error(ret)) {
5595                 unlock_drm_version(ver, target_ver, false);
5596             } else {
5597                 host_to_target_drmversion(target_ver, ver);
5598             }
5599         }
5600         unlock_user_struct(target_ver, arg, 0);
5601         return ret;
5602     }
5603     return -TARGET_ENOSYS;
5604 }
5605 
5606 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5607                                            struct drm_i915_getparam *gparam,
5608                                            int fd, abi_long arg)
5609 {
5610     abi_long ret;
5611     int value;
5612     struct target_drm_i915_getparam *target_gparam;
5613 
5614     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5615         return -TARGET_EFAULT;
5616     }
5617 
5618     __get_user(gparam->param, &target_gparam->param);
5619     gparam->value = &value;
5620     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5621     put_user_s32(value, target_gparam->value);
5622 
5623     unlock_user_struct(target_gparam, arg, 0);
5624     return ret;
5625 }
5626 
5627 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5628                                   int fd, int cmd, abi_long arg)
5629 {
5630     switch (ie->host_cmd) {
5631     case DRM_IOCTL_I915_GETPARAM:
5632         return do_ioctl_drm_i915_getparam(ie,
5633                                           (struct drm_i915_getparam *)buf_temp,
5634                                           fd, arg);
5635     default:
5636         return -TARGET_ENOSYS;
5637     }
5638 }
5639 
5640 #endif
5641 
5642 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5643                                         int fd, int cmd, abi_long arg)
5644 {
5645     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5646     struct tun_filter *target_filter;
5647     char *target_addr;
5648 
5649     assert(ie->access == IOC_W);
5650 
5651     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5652     if (!target_filter) {
5653         return -TARGET_EFAULT;
5654     }
5655     filter->flags = tswap16(target_filter->flags);
5656     filter->count = tswap16(target_filter->count);
5657     unlock_user(target_filter, arg, 0);
5658 
5659     if (filter->count) {
5660         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5661             MAX_STRUCT_SIZE) {
5662             return -TARGET_EFAULT;
5663         }
5664 
5665         target_addr = lock_user(VERIFY_READ,
5666                                 arg + offsetof(struct tun_filter, addr),
5667                                 filter->count * ETH_ALEN, 1);
5668         if (!target_addr) {
5669             return -TARGET_EFAULT;
5670         }
5671         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5672         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5673     }
5674 
5675     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5676 }
5677 
5678 IOCTLEntry ioctl_entries[] = {
5679 #define IOCTL(cmd, access, ...) \
5680     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5681 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5682     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5683 #define IOCTL_IGNORE(cmd) \
5684     { TARGET_ ## cmd, 0, #cmd },
5685 #include "ioctls.h"
5686     { 0, 0, },
5687 };
5688 
5689 /* ??? Implement proper locking for ioctls.  */
5690 /* do_ioctl() Must return target values and target errnos. */
5691 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5692 {
5693     const IOCTLEntry *ie;
5694     const argtype *arg_type;
5695     abi_long ret;
5696     uint8_t buf_temp[MAX_STRUCT_SIZE];
5697     int target_size;
5698     void *argptr;
5699 
5700     ie = ioctl_entries;
5701     for(;;) {
5702         if (ie->target_cmd == 0) {
5703             qemu_log_mask(
5704                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5705             return -TARGET_ENOSYS;
5706         }
5707         if (ie->target_cmd == cmd)
5708             break;
5709         ie++;
5710     }
5711     arg_type = ie->arg_type;
5712     if (ie->do_ioctl) {
5713         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5714     } else if (!ie->host_cmd) {
5715         /* Some architectures define BSD ioctls in their headers
5716            that are not implemented in Linux.  */
5717         return -TARGET_ENOSYS;
5718     }
5719 
5720     switch(arg_type[0]) {
5721     case TYPE_NULL:
5722         /* no argument */
5723         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5724         break;
5725     case TYPE_PTRVOID:
5726     case TYPE_INT:
5727     case TYPE_LONG:
5728     case TYPE_ULONG:
5729         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5730         break;
5731     case TYPE_PTR:
5732         arg_type++;
5733         target_size = thunk_type_size(arg_type, 0);
5734         switch(ie->access) {
5735         case IOC_R:
5736             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5737             if (!is_error(ret)) {
5738                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5739                 if (!argptr)
5740                     return -TARGET_EFAULT;
5741                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5742                 unlock_user(argptr, arg, target_size);
5743             }
5744             break;
5745         case IOC_W:
5746             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5747             if (!argptr)
5748                 return -TARGET_EFAULT;
5749             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5750             unlock_user(argptr, arg, 0);
5751             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5752             break;
5753         default:
5754         case IOC_RW:
5755             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5756             if (!argptr)
5757                 return -TARGET_EFAULT;
5758             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5759             unlock_user(argptr, arg, 0);
5760             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5761             if (!is_error(ret)) {
5762                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5763                 if (!argptr)
5764                     return -TARGET_EFAULT;
5765                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5766                 unlock_user(argptr, arg, target_size);
5767             }
5768             break;
5769         }
5770         break;
5771     default:
5772         qemu_log_mask(LOG_UNIMP,
5773                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5774                       (long)cmd, arg_type[0]);
5775         ret = -TARGET_ENOSYS;
5776         break;
5777     }
5778     return ret;
5779 }
5780 
5781 static const bitmask_transtbl iflag_tbl[] = {
5782         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5783         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5784         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5785         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5786         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5787         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5788         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5789         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5790         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5791         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5792         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5793         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5794         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5795         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5796         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5797         { 0, 0, 0, 0 }
5798 };
5799 
5800 static const bitmask_transtbl oflag_tbl[] = {
5801 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5802 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5803 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5804 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5805 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5806 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5807 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5808 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5809 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5810 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5811 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5812 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5813 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5814 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5815 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5816 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5817 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5818 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5819 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5820 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5821 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5822 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5823 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5824 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5825 	{ 0, 0, 0, 0 }
5826 };
5827 
5828 static const bitmask_transtbl cflag_tbl[] = {
5829 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5830 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5831 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5832 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5833 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5834 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5835 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5836 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5837 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5838 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5839 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5840 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5841 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5842 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5843 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5844 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5845 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5846 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5847 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5848 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5849 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5850 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5851 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5852 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5853 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5854 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5855 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5856 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5857 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5858 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5859 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5860 	{ 0, 0, 0, 0 }
5861 };
5862 
5863 static const bitmask_transtbl lflag_tbl[] = {
5864   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5865   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5866   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5867   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5868   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5869   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5870   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5871   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5872   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5873   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5874   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5875   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5876   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5877   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5878   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5879   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5880   { 0, 0, 0, 0 }
5881 };
5882 
5883 static void target_to_host_termios (void *dst, const void *src)
5884 {
5885     struct host_termios *host = dst;
5886     const struct target_termios *target = src;
5887 
5888     host->c_iflag =
5889         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5890     host->c_oflag =
5891         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5892     host->c_cflag =
5893         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5894     host->c_lflag =
5895         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5896     host->c_line = target->c_line;
5897 
5898     memset(host->c_cc, 0, sizeof(host->c_cc));
5899     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5900     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5901     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5902     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5903     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5904     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5905     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5906     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5907     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5908     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5909     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5910     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5911     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5912     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5913     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5914     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5915     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5916 }
5917 
5918 static void host_to_target_termios (void *dst, const void *src)
5919 {
5920     struct target_termios *target = dst;
5921     const struct host_termios *host = src;
5922 
5923     target->c_iflag =
5924         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5925     target->c_oflag =
5926         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5927     target->c_cflag =
5928         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5929     target->c_lflag =
5930         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5931     target->c_line = host->c_line;
5932 
5933     memset(target->c_cc, 0, sizeof(target->c_cc));
5934     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5935     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5936     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5937     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5938     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5939     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5940     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5941     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5942     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5943     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5944     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5945     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5946     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5947     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5948     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5949     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5950     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5951 }
5952 
5953 static const StructEntry struct_termios_def = {
5954     .convert = { host_to_target_termios, target_to_host_termios },
5955     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5956     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5957     .print = print_termios,
5958 };
5959 
5960 static const bitmask_transtbl mmap_flags_tbl[] = {
5961     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5962     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5963     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5964     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5965       MAP_ANONYMOUS, MAP_ANONYMOUS },
5966     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5967       MAP_GROWSDOWN, MAP_GROWSDOWN },
5968     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5969       MAP_DENYWRITE, MAP_DENYWRITE },
5970     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5971       MAP_EXECUTABLE, MAP_EXECUTABLE },
5972     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5973     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5974       MAP_NORESERVE, MAP_NORESERVE },
5975     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5976     /* MAP_STACK had been ignored by the kernel for quite some time.
5977        Recognize it for the target insofar as we do not want to pass
5978        it through to the host.  */
5979     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5980     { 0, 0, 0, 0 }
5981 };
5982 
5983 /*
5984  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5985  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5986  */
5987 #if defined(TARGET_I386)
5988 
5989 /* NOTE: there is really one LDT for all the threads */
5990 static uint8_t *ldt_table;
5991 
5992 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5993 {
5994     int size;
5995     void *p;
5996 
5997     if (!ldt_table)
5998         return 0;
5999     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6000     if (size > bytecount)
6001         size = bytecount;
6002     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6003     if (!p)
6004         return -TARGET_EFAULT;
6005     /* ??? Should this by byteswapped?  */
6006     memcpy(p, ldt_table, size);
6007     unlock_user(p, ptr, size);
6008     return size;
6009 }
6010 
6011 /* XXX: add locking support */
6012 static abi_long write_ldt(CPUX86State *env,
6013                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6014 {
6015     struct target_modify_ldt_ldt_s ldt_info;
6016     struct target_modify_ldt_ldt_s *target_ldt_info;
6017     int seg_32bit, contents, read_exec_only, limit_in_pages;
6018     int seg_not_present, useable, lm;
6019     uint32_t *lp, entry_1, entry_2;
6020 
6021     if (bytecount != sizeof(ldt_info))
6022         return -TARGET_EINVAL;
6023     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6024         return -TARGET_EFAULT;
6025     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6026     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6027     ldt_info.limit = tswap32(target_ldt_info->limit);
6028     ldt_info.flags = tswap32(target_ldt_info->flags);
6029     unlock_user_struct(target_ldt_info, ptr, 0);
6030 
6031     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6032         return -TARGET_EINVAL;
6033     seg_32bit = ldt_info.flags & 1;
6034     contents = (ldt_info.flags >> 1) & 3;
6035     read_exec_only = (ldt_info.flags >> 3) & 1;
6036     limit_in_pages = (ldt_info.flags >> 4) & 1;
6037     seg_not_present = (ldt_info.flags >> 5) & 1;
6038     useable = (ldt_info.flags >> 6) & 1;
6039 #ifdef TARGET_ABI32
6040     lm = 0;
6041 #else
6042     lm = (ldt_info.flags >> 7) & 1;
6043 #endif
6044     if (contents == 3) {
6045         if (oldmode)
6046             return -TARGET_EINVAL;
6047         if (seg_not_present == 0)
6048             return -TARGET_EINVAL;
6049     }
6050     /* allocate the LDT */
6051     if (!ldt_table) {
6052         env->ldt.base = target_mmap(0,
6053                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6054                                     PROT_READ|PROT_WRITE,
6055                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6056         if (env->ldt.base == -1)
6057             return -TARGET_ENOMEM;
6058         memset(g2h_untagged(env->ldt.base), 0,
6059                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6060         env->ldt.limit = 0xffff;
6061         ldt_table = g2h_untagged(env->ldt.base);
6062     }
6063 
6064     /* NOTE: same code as Linux kernel */
6065     /* Allow LDTs to be cleared by the user. */
6066     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6067         if (oldmode ||
6068             (contents == 0		&&
6069              read_exec_only == 1	&&
6070              seg_32bit == 0		&&
6071              limit_in_pages == 0	&&
6072              seg_not_present == 1	&&
6073              useable == 0 )) {
6074             entry_1 = 0;
6075             entry_2 = 0;
6076             goto install;
6077         }
6078     }
6079 
6080     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6081         (ldt_info.limit & 0x0ffff);
6082     entry_2 = (ldt_info.base_addr & 0xff000000) |
6083         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6084         (ldt_info.limit & 0xf0000) |
6085         ((read_exec_only ^ 1) << 9) |
6086         (contents << 10) |
6087         ((seg_not_present ^ 1) << 15) |
6088         (seg_32bit << 22) |
6089         (limit_in_pages << 23) |
6090         (lm << 21) |
6091         0x7000;
6092     if (!oldmode)
6093         entry_2 |= (useable << 20);
6094 
6095     /* Install the new entry ...  */
6096 install:
6097     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6098     lp[0] = tswap32(entry_1);
6099     lp[1] = tswap32(entry_2);
6100     return 0;
6101 }
6102 
6103 /* specific and weird i386 syscalls */
6104 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6105                               unsigned long bytecount)
6106 {
6107     abi_long ret;
6108 
6109     switch (func) {
6110     case 0:
6111         ret = read_ldt(ptr, bytecount);
6112         break;
6113     case 1:
6114         ret = write_ldt(env, ptr, bytecount, 1);
6115         break;
6116     case 0x11:
6117         ret = write_ldt(env, ptr, bytecount, 0);
6118         break;
6119     default:
6120         ret = -TARGET_ENOSYS;
6121         break;
6122     }
6123     return ret;
6124 }
6125 
6126 #if defined(TARGET_ABI32)
6127 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6128 {
6129     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6130     struct target_modify_ldt_ldt_s ldt_info;
6131     struct target_modify_ldt_ldt_s *target_ldt_info;
6132     int seg_32bit, contents, read_exec_only, limit_in_pages;
6133     int seg_not_present, useable, lm;
6134     uint32_t *lp, entry_1, entry_2;
6135     int i;
6136 
6137     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6138     if (!target_ldt_info)
6139         return -TARGET_EFAULT;
6140     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6141     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6142     ldt_info.limit = tswap32(target_ldt_info->limit);
6143     ldt_info.flags = tswap32(target_ldt_info->flags);
6144     if (ldt_info.entry_number == -1) {
6145         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6146             if (gdt_table[i] == 0) {
6147                 ldt_info.entry_number = i;
6148                 target_ldt_info->entry_number = tswap32(i);
6149                 break;
6150             }
6151         }
6152     }
6153     unlock_user_struct(target_ldt_info, ptr, 1);
6154 
6155     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6156         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6157            return -TARGET_EINVAL;
6158     seg_32bit = ldt_info.flags & 1;
6159     contents = (ldt_info.flags >> 1) & 3;
6160     read_exec_only = (ldt_info.flags >> 3) & 1;
6161     limit_in_pages = (ldt_info.flags >> 4) & 1;
6162     seg_not_present = (ldt_info.flags >> 5) & 1;
6163     useable = (ldt_info.flags >> 6) & 1;
6164 #ifdef TARGET_ABI32
6165     lm = 0;
6166 #else
6167     lm = (ldt_info.flags >> 7) & 1;
6168 #endif
6169 
6170     if (contents == 3) {
6171         if (seg_not_present == 0)
6172             return -TARGET_EINVAL;
6173     }
6174 
6175     /* NOTE: same code as Linux kernel */
6176     /* Allow LDTs to be cleared by the user. */
6177     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6178         if ((contents == 0             &&
6179              read_exec_only == 1       &&
6180              seg_32bit == 0            &&
6181              limit_in_pages == 0       &&
6182              seg_not_present == 1      &&
6183              useable == 0 )) {
6184             entry_1 = 0;
6185             entry_2 = 0;
6186             goto install;
6187         }
6188     }
6189 
6190     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6191         (ldt_info.limit & 0x0ffff);
6192     entry_2 = (ldt_info.base_addr & 0xff000000) |
6193         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6194         (ldt_info.limit & 0xf0000) |
6195         ((read_exec_only ^ 1) << 9) |
6196         (contents << 10) |
6197         ((seg_not_present ^ 1) << 15) |
6198         (seg_32bit << 22) |
6199         (limit_in_pages << 23) |
6200         (useable << 20) |
6201         (lm << 21) |
6202         0x7000;
6203 
6204     /* Install the new entry ...  */
6205 install:
6206     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6207     lp[0] = tswap32(entry_1);
6208     lp[1] = tswap32(entry_2);
6209     return 0;
6210 }
6211 
6212 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6213 {
6214     struct target_modify_ldt_ldt_s *target_ldt_info;
6215     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6216     uint32_t base_addr, limit, flags;
6217     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6218     int seg_not_present, useable, lm;
6219     uint32_t *lp, entry_1, entry_2;
6220 
6221     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6222     if (!target_ldt_info)
6223         return -TARGET_EFAULT;
6224     idx = tswap32(target_ldt_info->entry_number);
6225     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6226         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6227         unlock_user_struct(target_ldt_info, ptr, 1);
6228         return -TARGET_EINVAL;
6229     }
6230     lp = (uint32_t *)(gdt_table + idx);
6231     entry_1 = tswap32(lp[0]);
6232     entry_2 = tswap32(lp[1]);
6233 
6234     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6235     contents = (entry_2 >> 10) & 3;
6236     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6237     seg_32bit = (entry_2 >> 22) & 1;
6238     limit_in_pages = (entry_2 >> 23) & 1;
6239     useable = (entry_2 >> 20) & 1;
6240 #ifdef TARGET_ABI32
6241     lm = 0;
6242 #else
6243     lm = (entry_2 >> 21) & 1;
6244 #endif
6245     flags = (seg_32bit << 0) | (contents << 1) |
6246         (read_exec_only << 3) | (limit_in_pages << 4) |
6247         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6248     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6249     base_addr = (entry_1 >> 16) |
6250         (entry_2 & 0xff000000) |
6251         ((entry_2 & 0xff) << 16);
6252     target_ldt_info->base_addr = tswapal(base_addr);
6253     target_ldt_info->limit = tswap32(limit);
6254     target_ldt_info->flags = tswap32(flags);
6255     unlock_user_struct(target_ldt_info, ptr, 1);
6256     return 0;
6257 }
6258 
6259 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6260 {
6261     return -TARGET_ENOSYS;
6262 }
6263 #else
6264 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6265 {
6266     abi_long ret = 0;
6267     abi_ulong val;
6268     int idx;
6269 
6270     switch(code) {
6271     case TARGET_ARCH_SET_GS:
6272     case TARGET_ARCH_SET_FS:
6273         if (code == TARGET_ARCH_SET_GS)
6274             idx = R_GS;
6275         else
6276             idx = R_FS;
6277         cpu_x86_load_seg(env, idx, 0);
6278         env->segs[idx].base = addr;
6279         break;
6280     case TARGET_ARCH_GET_GS:
6281     case TARGET_ARCH_GET_FS:
6282         if (code == TARGET_ARCH_GET_GS)
6283             idx = R_GS;
6284         else
6285             idx = R_FS;
6286         val = env->segs[idx].base;
6287         if (put_user(val, addr, abi_ulong))
6288             ret = -TARGET_EFAULT;
6289         break;
6290     default:
6291         ret = -TARGET_EINVAL;
6292         break;
6293     }
6294     return ret;
6295 }
6296 #endif /* defined(TARGET_ABI32 */
6297 #endif /* defined(TARGET_I386) */
6298 
6299 /*
6300  * These constants are generic.  Supply any that are missing from the host.
6301  */
6302 #ifndef PR_SET_NAME
6303 # define PR_SET_NAME    15
6304 # define PR_GET_NAME    16
6305 #endif
6306 #ifndef PR_SET_FP_MODE
6307 # define PR_SET_FP_MODE 45
6308 # define PR_GET_FP_MODE 46
6309 # define PR_FP_MODE_FR   (1 << 0)
6310 # define PR_FP_MODE_FRE  (1 << 1)
6311 #endif
6312 #ifndef PR_SVE_SET_VL
6313 # define PR_SVE_SET_VL  50
6314 # define PR_SVE_GET_VL  51
6315 # define PR_SVE_VL_LEN_MASK  0xffff
6316 # define PR_SVE_VL_INHERIT   (1 << 17)
6317 #endif
6318 #ifndef PR_PAC_RESET_KEYS
6319 # define PR_PAC_RESET_KEYS  54
6320 # define PR_PAC_APIAKEY   (1 << 0)
6321 # define PR_PAC_APIBKEY   (1 << 1)
6322 # define PR_PAC_APDAKEY   (1 << 2)
6323 # define PR_PAC_APDBKEY   (1 << 3)
6324 # define PR_PAC_APGAKEY   (1 << 4)
6325 #endif
6326 #ifndef PR_SET_TAGGED_ADDR_CTRL
6327 # define PR_SET_TAGGED_ADDR_CTRL 55
6328 # define PR_GET_TAGGED_ADDR_CTRL 56
6329 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6330 #endif
6331 #ifndef PR_MTE_TCF_SHIFT
6332 # define PR_MTE_TCF_SHIFT       1
6333 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6334 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6335 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6336 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6337 # define PR_MTE_TAG_SHIFT       3
6338 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6339 #endif
6340 
6341 #include "target_prctl.h"
6342 
6343 static abi_long do_prctl_inval0(CPUArchState *env)
6344 {
6345     return -TARGET_EINVAL;
6346 }
6347 
6348 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6349 {
6350     return -TARGET_EINVAL;
6351 }
6352 
6353 #ifndef do_prctl_get_fp_mode
6354 #define do_prctl_get_fp_mode do_prctl_inval0
6355 #endif
6356 #ifndef do_prctl_set_fp_mode
6357 #define do_prctl_set_fp_mode do_prctl_inval1
6358 #endif
6359 #ifndef do_prctl_get_vl
6360 #define do_prctl_get_vl do_prctl_inval0
6361 #endif
6362 #ifndef do_prctl_set_vl
6363 #define do_prctl_set_vl do_prctl_inval1
6364 #endif
6365 #ifndef do_prctl_reset_keys
6366 #define do_prctl_reset_keys do_prctl_inval1
6367 #endif
6368 #ifndef do_prctl_set_tagged_addr_ctrl
6369 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6370 #endif
6371 #ifndef do_prctl_get_tagged_addr_ctrl
6372 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6373 #endif
6374 
6375 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6376                          abi_long arg3, abi_long arg4, abi_long arg5)
6377 {
6378     abi_long ret;
6379 
6380     switch (option) {
6381     case PR_GET_PDEATHSIG:
6382         {
6383             int deathsig;
6384             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6385                                   arg3, arg4, arg5));
6386             if (!is_error(ret) && arg2 && put_user_s32(deathsig, arg2)) {
6387                 return -TARGET_EFAULT;
6388             }
6389             return ret;
6390         }
6391     case PR_GET_NAME:
6392         {
6393             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6394             if (!name) {
6395                 return -TARGET_EFAULT;
6396             }
6397             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6398                                   arg3, arg4, arg5));
6399             unlock_user(name, arg2, 16);
6400             return ret;
6401         }
6402     case PR_SET_NAME:
6403         {
6404             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6405             if (!name) {
6406                 return -TARGET_EFAULT;
6407             }
6408             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6409                                   arg3, arg4, arg5));
6410             unlock_user(name, arg2, 0);
6411             return ret;
6412         }
6413     case PR_GET_FP_MODE:
6414         return do_prctl_get_fp_mode(env);
6415     case PR_SET_FP_MODE:
6416         return do_prctl_set_fp_mode(env, arg2);
6417     case PR_SVE_GET_VL:
6418         return do_prctl_get_vl(env);
6419     case PR_SVE_SET_VL:
6420         return do_prctl_set_vl(env, arg2);
6421     case PR_PAC_RESET_KEYS:
6422         if (arg3 || arg4 || arg5) {
6423             return -TARGET_EINVAL;
6424         }
6425         return do_prctl_reset_keys(env, arg2);
6426     case PR_SET_TAGGED_ADDR_CTRL:
6427         if (arg3 || arg4 || arg5) {
6428             return -TARGET_EINVAL;
6429         }
6430         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6431     case PR_GET_TAGGED_ADDR_CTRL:
6432         if (arg2 || arg3 || arg4 || arg5) {
6433             return -TARGET_EINVAL;
6434         }
6435         return do_prctl_get_tagged_addr_ctrl(env);
6436     case PR_GET_SECCOMP:
6437     case PR_SET_SECCOMP:
6438         /* Disable seccomp to prevent the target disabling syscalls we need. */
6439         return -TARGET_EINVAL;
6440     default:
6441         /* Most prctl options have no pointer arguments */
6442         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6443     }
6444 }
6445 
6446 #define NEW_STACK_SIZE 0x40000
6447 
6448 
6449 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6450 typedef struct {
6451     CPUArchState *env;
6452     pthread_mutex_t mutex;
6453     pthread_cond_t cond;
6454     pthread_t thread;
6455     uint32_t tid;
6456     abi_ulong child_tidptr;
6457     abi_ulong parent_tidptr;
6458     sigset_t sigmask;
6459 } new_thread_info;
6460 
6461 static void *clone_func(void *arg)
6462 {
6463     new_thread_info *info = arg;
6464     CPUArchState *env;
6465     CPUState *cpu;
6466     TaskState *ts;
6467 
6468     rcu_register_thread();
6469     tcg_register_thread();
6470     env = info->env;
6471     cpu = env_cpu(env);
6472     thread_cpu = cpu;
6473     ts = (TaskState *)cpu->opaque;
6474     info->tid = sys_gettid();
6475     task_settid(ts);
6476     if (info->child_tidptr)
6477         put_user_u32(info->tid, info->child_tidptr);
6478     if (info->parent_tidptr)
6479         put_user_u32(info->tid, info->parent_tidptr);
6480     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6481     /* Enable signals.  */
6482     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6483     /* Signal to the parent that we're ready.  */
6484     pthread_mutex_lock(&info->mutex);
6485     pthread_cond_broadcast(&info->cond);
6486     pthread_mutex_unlock(&info->mutex);
6487     /* Wait until the parent has finished initializing the tls state.  */
6488     pthread_mutex_lock(&clone_lock);
6489     pthread_mutex_unlock(&clone_lock);
6490     cpu_loop(env);
6491     /* never exits */
6492     return NULL;
6493 }
6494 
6495 /* do_fork() Must return host values and target errnos (unlike most
6496    do_*() functions). */
6497 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6498                    abi_ulong parent_tidptr, target_ulong newtls,
6499                    abi_ulong child_tidptr)
6500 {
6501     CPUState *cpu = env_cpu(env);
6502     int ret;
6503     TaskState *ts;
6504     CPUState *new_cpu;
6505     CPUArchState *new_env;
6506     sigset_t sigmask;
6507 
6508     flags &= ~CLONE_IGNORED_FLAGS;
6509 
6510     /* Emulate vfork() with fork() */
6511     if (flags & CLONE_VFORK)
6512         flags &= ~(CLONE_VFORK | CLONE_VM);
6513 
6514     if (flags & CLONE_VM) {
6515         TaskState *parent_ts = (TaskState *)cpu->opaque;
6516         new_thread_info info;
6517         pthread_attr_t attr;
6518 
6519         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6520             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6521             return -TARGET_EINVAL;
6522         }
6523 
6524         ts = g_new0(TaskState, 1);
6525         init_task_state(ts);
6526 
6527         /* Grab a mutex so that thread setup appears atomic.  */
6528         pthread_mutex_lock(&clone_lock);
6529 
6530         /*
6531          * If this is our first additional thread, we need to ensure we
6532          * generate code for parallel execution and flush old translations.
6533          * Do this now so that the copy gets CF_PARALLEL too.
6534          */
6535         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6536             cpu->tcg_cflags |= CF_PARALLEL;
6537             tb_flush(cpu);
6538         }
6539 
6540         /* we create a new CPU instance. */
6541         new_env = cpu_copy(env);
6542         /* Init regs that differ from the parent.  */
6543         cpu_clone_regs_child(new_env, newsp, flags);
6544         cpu_clone_regs_parent(env, flags);
6545         new_cpu = env_cpu(new_env);
6546         new_cpu->opaque = ts;
6547         ts->bprm = parent_ts->bprm;
6548         ts->info = parent_ts->info;
6549         ts->signal_mask = parent_ts->signal_mask;
6550 
6551         if (flags & CLONE_CHILD_CLEARTID) {
6552             ts->child_tidptr = child_tidptr;
6553         }
6554 
6555         if (flags & CLONE_SETTLS) {
6556             cpu_set_tls (new_env, newtls);
6557         }
6558 
6559         memset(&info, 0, sizeof(info));
6560         pthread_mutex_init(&info.mutex, NULL);
6561         pthread_mutex_lock(&info.mutex);
6562         pthread_cond_init(&info.cond, NULL);
6563         info.env = new_env;
6564         if (flags & CLONE_CHILD_SETTID) {
6565             info.child_tidptr = child_tidptr;
6566         }
6567         if (flags & CLONE_PARENT_SETTID) {
6568             info.parent_tidptr = parent_tidptr;
6569         }
6570 
6571         ret = pthread_attr_init(&attr);
6572         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6573         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6574         /* It is not safe to deliver signals until the child has finished
6575            initializing, so temporarily block all signals.  */
6576         sigfillset(&sigmask);
6577         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6578         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6579 
6580         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6581         /* TODO: Free new CPU state if thread creation failed.  */
6582 
6583         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6584         pthread_attr_destroy(&attr);
6585         if (ret == 0) {
6586             /* Wait for the child to initialize.  */
6587             pthread_cond_wait(&info.cond, &info.mutex);
6588             ret = info.tid;
6589         } else {
6590             ret = -1;
6591         }
6592         pthread_mutex_unlock(&info.mutex);
6593         pthread_cond_destroy(&info.cond);
6594         pthread_mutex_destroy(&info.mutex);
6595         pthread_mutex_unlock(&clone_lock);
6596     } else {
6597         /* if no CLONE_VM, we consider it is a fork */
6598         if (flags & CLONE_INVALID_FORK_FLAGS) {
6599             return -TARGET_EINVAL;
6600         }
6601 
6602         /* We can't support custom termination signals */
6603         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6604             return -TARGET_EINVAL;
6605         }
6606 
6607         if (block_signals()) {
6608             return -QEMU_ERESTARTSYS;
6609         }
6610 
6611         fork_start();
6612         ret = fork();
6613         if (ret == 0) {
6614             /* Child Process.  */
6615             cpu_clone_regs_child(env, newsp, flags);
6616             fork_end(1);
6617             /* There is a race condition here.  The parent process could
6618                theoretically read the TID in the child process before the child
6619                tid is set.  This would require using either ptrace
6620                (not implemented) or having *_tidptr to point at a shared memory
6621                mapping.  We can't repeat the spinlock hack used above because
6622                the child process gets its own copy of the lock.  */
6623             if (flags & CLONE_CHILD_SETTID)
6624                 put_user_u32(sys_gettid(), child_tidptr);
6625             if (flags & CLONE_PARENT_SETTID)
6626                 put_user_u32(sys_gettid(), parent_tidptr);
6627             ts = (TaskState *)cpu->opaque;
6628             if (flags & CLONE_SETTLS)
6629                 cpu_set_tls (env, newtls);
6630             if (flags & CLONE_CHILD_CLEARTID)
6631                 ts->child_tidptr = child_tidptr;
6632         } else {
6633             cpu_clone_regs_parent(env, flags);
6634             fork_end(0);
6635         }
6636     }
6637     return ret;
6638 }
6639 
6640 /* warning : doesn't handle linux specific flags... */
6641 static int target_to_host_fcntl_cmd(int cmd)
6642 {
6643     int ret;
6644 
6645     switch(cmd) {
6646     case TARGET_F_DUPFD:
6647     case TARGET_F_GETFD:
6648     case TARGET_F_SETFD:
6649     case TARGET_F_GETFL:
6650     case TARGET_F_SETFL:
6651     case TARGET_F_OFD_GETLK:
6652     case TARGET_F_OFD_SETLK:
6653     case TARGET_F_OFD_SETLKW:
6654         ret = cmd;
6655         break;
6656     case TARGET_F_GETLK:
6657         ret = F_GETLK64;
6658         break;
6659     case TARGET_F_SETLK:
6660         ret = F_SETLK64;
6661         break;
6662     case TARGET_F_SETLKW:
6663         ret = F_SETLKW64;
6664         break;
6665     case TARGET_F_GETOWN:
6666         ret = F_GETOWN;
6667         break;
6668     case TARGET_F_SETOWN:
6669         ret = F_SETOWN;
6670         break;
6671     case TARGET_F_GETSIG:
6672         ret = F_GETSIG;
6673         break;
6674     case TARGET_F_SETSIG:
6675         ret = F_SETSIG;
6676         break;
6677 #if TARGET_ABI_BITS == 32
6678     case TARGET_F_GETLK64:
6679         ret = F_GETLK64;
6680         break;
6681     case TARGET_F_SETLK64:
6682         ret = F_SETLK64;
6683         break;
6684     case TARGET_F_SETLKW64:
6685         ret = F_SETLKW64;
6686         break;
6687 #endif
6688     case TARGET_F_SETLEASE:
6689         ret = F_SETLEASE;
6690         break;
6691     case TARGET_F_GETLEASE:
6692         ret = F_GETLEASE;
6693         break;
6694 #ifdef F_DUPFD_CLOEXEC
6695     case TARGET_F_DUPFD_CLOEXEC:
6696         ret = F_DUPFD_CLOEXEC;
6697         break;
6698 #endif
6699     case TARGET_F_NOTIFY:
6700         ret = F_NOTIFY;
6701         break;
6702 #ifdef F_GETOWN_EX
6703     case TARGET_F_GETOWN_EX:
6704         ret = F_GETOWN_EX;
6705         break;
6706 #endif
6707 #ifdef F_SETOWN_EX
6708     case TARGET_F_SETOWN_EX:
6709         ret = F_SETOWN_EX;
6710         break;
6711 #endif
6712 #ifdef F_SETPIPE_SZ
6713     case TARGET_F_SETPIPE_SZ:
6714         ret = F_SETPIPE_SZ;
6715         break;
6716     case TARGET_F_GETPIPE_SZ:
6717         ret = F_GETPIPE_SZ;
6718         break;
6719 #endif
6720 #ifdef F_ADD_SEALS
6721     case TARGET_F_ADD_SEALS:
6722         ret = F_ADD_SEALS;
6723         break;
6724     case TARGET_F_GET_SEALS:
6725         ret = F_GET_SEALS;
6726         break;
6727 #endif
6728     default:
6729         ret = -TARGET_EINVAL;
6730         break;
6731     }
6732 
6733 #if defined(__powerpc64__)
6734     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6735      * is not supported by kernel. The glibc fcntl call actually adjusts
6736      * them to 5, 6 and 7 before making the syscall(). Since we make the
6737      * syscall directly, adjust to what is supported by the kernel.
6738      */
6739     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6740         ret -= F_GETLK64 - 5;
6741     }
6742 #endif
6743 
6744     return ret;
6745 }
6746 
6747 #define FLOCK_TRANSTBL \
6748     switch (type) { \
6749     TRANSTBL_CONVERT(F_RDLCK); \
6750     TRANSTBL_CONVERT(F_WRLCK); \
6751     TRANSTBL_CONVERT(F_UNLCK); \
6752     }
6753 
6754 static int target_to_host_flock(int type)
6755 {
6756 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6757     FLOCK_TRANSTBL
6758 #undef  TRANSTBL_CONVERT
6759     return -TARGET_EINVAL;
6760 }
6761 
6762 static int host_to_target_flock(int type)
6763 {
6764 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6765     FLOCK_TRANSTBL
6766 #undef  TRANSTBL_CONVERT
6767     /* if we don't know how to convert the value coming
6768      * from the host we copy to the target field as-is
6769      */
6770     return type;
6771 }
6772 
6773 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6774                                             abi_ulong target_flock_addr)
6775 {
6776     struct target_flock *target_fl;
6777     int l_type;
6778 
6779     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6780         return -TARGET_EFAULT;
6781     }
6782 
6783     __get_user(l_type, &target_fl->l_type);
6784     l_type = target_to_host_flock(l_type);
6785     if (l_type < 0) {
6786         return l_type;
6787     }
6788     fl->l_type = l_type;
6789     __get_user(fl->l_whence, &target_fl->l_whence);
6790     __get_user(fl->l_start, &target_fl->l_start);
6791     __get_user(fl->l_len, &target_fl->l_len);
6792     __get_user(fl->l_pid, &target_fl->l_pid);
6793     unlock_user_struct(target_fl, target_flock_addr, 0);
6794     return 0;
6795 }
6796 
6797 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6798                                           const struct flock64 *fl)
6799 {
6800     struct target_flock *target_fl;
6801     short l_type;
6802 
6803     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6804         return -TARGET_EFAULT;
6805     }
6806 
6807     l_type = host_to_target_flock(fl->l_type);
6808     __put_user(l_type, &target_fl->l_type);
6809     __put_user(fl->l_whence, &target_fl->l_whence);
6810     __put_user(fl->l_start, &target_fl->l_start);
6811     __put_user(fl->l_len, &target_fl->l_len);
6812     __put_user(fl->l_pid, &target_fl->l_pid);
6813     unlock_user_struct(target_fl, target_flock_addr, 1);
6814     return 0;
6815 }
6816 
6817 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6818 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6819 
6820 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6821 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6822                                                    abi_ulong target_flock_addr)
6823 {
6824     struct target_oabi_flock64 *target_fl;
6825     int l_type;
6826 
6827     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6828         return -TARGET_EFAULT;
6829     }
6830 
6831     __get_user(l_type, &target_fl->l_type);
6832     l_type = target_to_host_flock(l_type);
6833     if (l_type < 0) {
6834         return l_type;
6835     }
6836     fl->l_type = l_type;
6837     __get_user(fl->l_whence, &target_fl->l_whence);
6838     __get_user(fl->l_start, &target_fl->l_start);
6839     __get_user(fl->l_len, &target_fl->l_len);
6840     __get_user(fl->l_pid, &target_fl->l_pid);
6841     unlock_user_struct(target_fl, target_flock_addr, 0);
6842     return 0;
6843 }
6844 
6845 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6846                                                  const struct flock64 *fl)
6847 {
6848     struct target_oabi_flock64 *target_fl;
6849     short l_type;
6850 
6851     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6852         return -TARGET_EFAULT;
6853     }
6854 
6855     l_type = host_to_target_flock(fl->l_type);
6856     __put_user(l_type, &target_fl->l_type);
6857     __put_user(fl->l_whence, &target_fl->l_whence);
6858     __put_user(fl->l_start, &target_fl->l_start);
6859     __put_user(fl->l_len, &target_fl->l_len);
6860     __put_user(fl->l_pid, &target_fl->l_pid);
6861     unlock_user_struct(target_fl, target_flock_addr, 1);
6862     return 0;
6863 }
6864 #endif
6865 
6866 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6867                                               abi_ulong target_flock_addr)
6868 {
6869     struct target_flock64 *target_fl;
6870     int l_type;
6871 
6872     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6873         return -TARGET_EFAULT;
6874     }
6875 
6876     __get_user(l_type, &target_fl->l_type);
6877     l_type = target_to_host_flock(l_type);
6878     if (l_type < 0) {
6879         return l_type;
6880     }
6881     fl->l_type = l_type;
6882     __get_user(fl->l_whence, &target_fl->l_whence);
6883     __get_user(fl->l_start, &target_fl->l_start);
6884     __get_user(fl->l_len, &target_fl->l_len);
6885     __get_user(fl->l_pid, &target_fl->l_pid);
6886     unlock_user_struct(target_fl, target_flock_addr, 0);
6887     return 0;
6888 }
6889 
6890 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6891                                             const struct flock64 *fl)
6892 {
6893     struct target_flock64 *target_fl;
6894     short l_type;
6895 
6896     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6897         return -TARGET_EFAULT;
6898     }
6899 
6900     l_type = host_to_target_flock(fl->l_type);
6901     __put_user(l_type, &target_fl->l_type);
6902     __put_user(fl->l_whence, &target_fl->l_whence);
6903     __put_user(fl->l_start, &target_fl->l_start);
6904     __put_user(fl->l_len, &target_fl->l_len);
6905     __put_user(fl->l_pid, &target_fl->l_pid);
6906     unlock_user_struct(target_fl, target_flock_addr, 1);
6907     return 0;
6908 }
6909 
6910 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6911 {
6912     struct flock64 fl64;
6913 #ifdef F_GETOWN_EX
6914     struct f_owner_ex fox;
6915     struct target_f_owner_ex *target_fox;
6916 #endif
6917     abi_long ret;
6918     int host_cmd = target_to_host_fcntl_cmd(cmd);
6919 
6920     if (host_cmd == -TARGET_EINVAL)
6921 	    return host_cmd;
6922 
6923     switch(cmd) {
6924     case TARGET_F_GETLK:
6925         ret = copy_from_user_flock(&fl64, arg);
6926         if (ret) {
6927             return ret;
6928         }
6929         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6930         if (ret == 0) {
6931             ret = copy_to_user_flock(arg, &fl64);
6932         }
6933         break;
6934 
6935     case TARGET_F_SETLK:
6936     case TARGET_F_SETLKW:
6937         ret = copy_from_user_flock(&fl64, arg);
6938         if (ret) {
6939             return ret;
6940         }
6941         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6942         break;
6943 
6944     case TARGET_F_GETLK64:
6945     case TARGET_F_OFD_GETLK:
6946         ret = copy_from_user_flock64(&fl64, arg);
6947         if (ret) {
6948             return ret;
6949         }
6950         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6951         if (ret == 0) {
6952             ret = copy_to_user_flock64(arg, &fl64);
6953         }
6954         break;
6955     case TARGET_F_SETLK64:
6956     case TARGET_F_SETLKW64:
6957     case TARGET_F_OFD_SETLK:
6958     case TARGET_F_OFD_SETLKW:
6959         ret = copy_from_user_flock64(&fl64, arg);
6960         if (ret) {
6961             return ret;
6962         }
6963         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6964         break;
6965 
6966     case TARGET_F_GETFL:
6967         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6968         if (ret >= 0) {
6969             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6970         }
6971         break;
6972 
6973     case TARGET_F_SETFL:
6974         ret = get_errno(safe_fcntl(fd, host_cmd,
6975                                    target_to_host_bitmask(arg,
6976                                                           fcntl_flags_tbl)));
6977         break;
6978 
6979 #ifdef F_GETOWN_EX
6980     case TARGET_F_GETOWN_EX:
6981         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6982         if (ret >= 0) {
6983             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6984                 return -TARGET_EFAULT;
6985             target_fox->type = tswap32(fox.type);
6986             target_fox->pid = tswap32(fox.pid);
6987             unlock_user_struct(target_fox, arg, 1);
6988         }
6989         break;
6990 #endif
6991 
6992 #ifdef F_SETOWN_EX
6993     case TARGET_F_SETOWN_EX:
6994         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6995             return -TARGET_EFAULT;
6996         fox.type = tswap32(target_fox->type);
6997         fox.pid = tswap32(target_fox->pid);
6998         unlock_user_struct(target_fox, arg, 0);
6999         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7000         break;
7001 #endif
7002 
7003     case TARGET_F_SETSIG:
7004         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7005         break;
7006 
7007     case TARGET_F_GETSIG:
7008         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7009         break;
7010 
7011     case TARGET_F_SETOWN:
7012     case TARGET_F_GETOWN:
7013     case TARGET_F_SETLEASE:
7014     case TARGET_F_GETLEASE:
7015     case TARGET_F_SETPIPE_SZ:
7016     case TARGET_F_GETPIPE_SZ:
7017     case TARGET_F_ADD_SEALS:
7018     case TARGET_F_GET_SEALS:
7019         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7020         break;
7021 
7022     default:
7023         ret = get_errno(safe_fcntl(fd, cmd, arg));
7024         break;
7025     }
7026     return ret;
7027 }
7028 
7029 #ifdef USE_UID16
7030 
7031 static inline int high2lowuid(int uid)
7032 {
7033     if (uid > 65535)
7034         return 65534;
7035     else
7036         return uid;
7037 }
7038 
7039 static inline int high2lowgid(int gid)
7040 {
7041     if (gid > 65535)
7042         return 65534;
7043     else
7044         return gid;
7045 }
7046 
7047 static inline int low2highuid(int uid)
7048 {
7049     if ((int16_t)uid == -1)
7050         return -1;
7051     else
7052         return uid;
7053 }
7054 
7055 static inline int low2highgid(int gid)
7056 {
7057     if ((int16_t)gid == -1)
7058         return -1;
7059     else
7060         return gid;
7061 }
7062 static inline int tswapid(int id)
7063 {
7064     return tswap16(id);
7065 }
7066 
7067 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7068 
7069 #else /* !USE_UID16 */
7070 static inline int high2lowuid(int uid)
7071 {
7072     return uid;
7073 }
7074 static inline int high2lowgid(int gid)
7075 {
7076     return gid;
7077 }
7078 static inline int low2highuid(int uid)
7079 {
7080     return uid;
7081 }
7082 static inline int low2highgid(int gid)
7083 {
7084     return gid;
7085 }
7086 static inline int tswapid(int id)
7087 {
7088     return tswap32(id);
7089 }
7090 
7091 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7092 
7093 #endif /* USE_UID16 */
7094 
7095 /* We must do direct syscalls for setting UID/GID, because we want to
7096  * implement the Linux system call semantics of "change only for this thread",
7097  * not the libc/POSIX semantics of "change for all threads in process".
7098  * (See http://ewontfix.com/17/ for more details.)
7099  * We use the 32-bit version of the syscalls if present; if it is not
7100  * then either the host architecture supports 32-bit UIDs natively with
7101  * the standard syscall, or the 16-bit UID is the best we can do.
7102  */
7103 #ifdef __NR_setuid32
7104 #define __NR_sys_setuid __NR_setuid32
7105 #else
7106 #define __NR_sys_setuid __NR_setuid
7107 #endif
7108 #ifdef __NR_setgid32
7109 #define __NR_sys_setgid __NR_setgid32
7110 #else
7111 #define __NR_sys_setgid __NR_setgid
7112 #endif
7113 #ifdef __NR_setresuid32
7114 #define __NR_sys_setresuid __NR_setresuid32
7115 #else
7116 #define __NR_sys_setresuid __NR_setresuid
7117 #endif
7118 #ifdef __NR_setresgid32
7119 #define __NR_sys_setresgid __NR_setresgid32
7120 #else
7121 #define __NR_sys_setresgid __NR_setresgid
7122 #endif
7123 
7124 _syscall1(int, sys_setuid, uid_t, uid)
7125 _syscall1(int, sys_setgid, gid_t, gid)
7126 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7127 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7128 
7129 void syscall_init(void)
7130 {
7131     IOCTLEntry *ie;
7132     const argtype *arg_type;
7133     int size;
7134 
7135     thunk_init(STRUCT_MAX);
7136 
7137 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7138 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7139 #include "syscall_types.h"
7140 #undef STRUCT
7141 #undef STRUCT_SPECIAL
7142 
7143     /* we patch the ioctl size if necessary. We rely on the fact that
7144        no ioctl has all the bits at '1' in the size field */
7145     ie = ioctl_entries;
7146     while (ie->target_cmd != 0) {
7147         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7148             TARGET_IOC_SIZEMASK) {
7149             arg_type = ie->arg_type;
7150             if (arg_type[0] != TYPE_PTR) {
7151                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7152                         ie->target_cmd);
7153                 exit(1);
7154             }
7155             arg_type++;
7156             size = thunk_type_size(arg_type, 0);
7157             ie->target_cmd = (ie->target_cmd &
7158                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7159                 (size << TARGET_IOC_SIZESHIFT);
7160         }
7161 
7162         /* automatic consistency check if same arch */
7163 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7164     (defined(__x86_64__) && defined(TARGET_X86_64))
7165         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7166             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7167                     ie->name, ie->target_cmd, ie->host_cmd);
7168         }
7169 #endif
7170         ie++;
7171     }
7172 }
7173 
7174 #ifdef TARGET_NR_truncate64
7175 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7176                                          abi_long arg2,
7177                                          abi_long arg3,
7178                                          abi_long arg4)
7179 {
7180     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7181         arg2 = arg3;
7182         arg3 = arg4;
7183     }
7184     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7185 }
7186 #endif
7187 
7188 #ifdef TARGET_NR_ftruncate64
7189 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7190                                           abi_long arg2,
7191                                           abi_long arg3,
7192                                           abi_long arg4)
7193 {
7194     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7195         arg2 = arg3;
7196         arg3 = arg4;
7197     }
7198     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7199 }
7200 #endif
7201 
7202 #if defined(TARGET_NR_timer_settime) || \
7203     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7204 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7205                                                  abi_ulong target_addr)
7206 {
7207     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7208                                 offsetof(struct target_itimerspec,
7209                                          it_interval)) ||
7210         target_to_host_timespec(&host_its->it_value, target_addr +
7211                                 offsetof(struct target_itimerspec,
7212                                          it_value))) {
7213         return -TARGET_EFAULT;
7214     }
7215 
7216     return 0;
7217 }
7218 #endif
7219 
7220 #if defined(TARGET_NR_timer_settime64) || \
7221     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7222 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7223                                                    abi_ulong target_addr)
7224 {
7225     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7226                                   offsetof(struct target__kernel_itimerspec,
7227                                            it_interval)) ||
7228         target_to_host_timespec64(&host_its->it_value, target_addr +
7229                                   offsetof(struct target__kernel_itimerspec,
7230                                            it_value))) {
7231         return -TARGET_EFAULT;
7232     }
7233 
7234     return 0;
7235 }
7236 #endif
7237 
7238 #if ((defined(TARGET_NR_timerfd_gettime) || \
7239       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7240       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7241 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7242                                                  struct itimerspec *host_its)
7243 {
7244     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7245                                                        it_interval),
7246                                 &host_its->it_interval) ||
7247         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7248                                                        it_value),
7249                                 &host_its->it_value)) {
7250         return -TARGET_EFAULT;
7251     }
7252     return 0;
7253 }
7254 #endif
7255 
7256 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7257       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7258       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7259 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7260                                                    struct itimerspec *host_its)
7261 {
7262     if (host_to_target_timespec64(target_addr +
7263                                   offsetof(struct target__kernel_itimerspec,
7264                                            it_interval),
7265                                   &host_its->it_interval) ||
7266         host_to_target_timespec64(target_addr +
7267                                   offsetof(struct target__kernel_itimerspec,
7268                                            it_value),
7269                                   &host_its->it_value)) {
7270         return -TARGET_EFAULT;
7271     }
7272     return 0;
7273 }
7274 #endif
7275 
7276 #if defined(TARGET_NR_adjtimex) || \
7277     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7278 static inline abi_long target_to_host_timex(struct timex *host_tx,
7279                                             abi_long target_addr)
7280 {
7281     struct target_timex *target_tx;
7282 
7283     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7284         return -TARGET_EFAULT;
7285     }
7286 
7287     __get_user(host_tx->modes, &target_tx->modes);
7288     __get_user(host_tx->offset, &target_tx->offset);
7289     __get_user(host_tx->freq, &target_tx->freq);
7290     __get_user(host_tx->maxerror, &target_tx->maxerror);
7291     __get_user(host_tx->esterror, &target_tx->esterror);
7292     __get_user(host_tx->status, &target_tx->status);
7293     __get_user(host_tx->constant, &target_tx->constant);
7294     __get_user(host_tx->precision, &target_tx->precision);
7295     __get_user(host_tx->tolerance, &target_tx->tolerance);
7296     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7297     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7298     __get_user(host_tx->tick, &target_tx->tick);
7299     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7300     __get_user(host_tx->jitter, &target_tx->jitter);
7301     __get_user(host_tx->shift, &target_tx->shift);
7302     __get_user(host_tx->stabil, &target_tx->stabil);
7303     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7304     __get_user(host_tx->calcnt, &target_tx->calcnt);
7305     __get_user(host_tx->errcnt, &target_tx->errcnt);
7306     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7307     __get_user(host_tx->tai, &target_tx->tai);
7308 
7309     unlock_user_struct(target_tx, target_addr, 0);
7310     return 0;
7311 }
7312 
7313 static inline abi_long host_to_target_timex(abi_long target_addr,
7314                                             struct timex *host_tx)
7315 {
7316     struct target_timex *target_tx;
7317 
7318     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7319         return -TARGET_EFAULT;
7320     }
7321 
7322     __put_user(host_tx->modes, &target_tx->modes);
7323     __put_user(host_tx->offset, &target_tx->offset);
7324     __put_user(host_tx->freq, &target_tx->freq);
7325     __put_user(host_tx->maxerror, &target_tx->maxerror);
7326     __put_user(host_tx->esterror, &target_tx->esterror);
7327     __put_user(host_tx->status, &target_tx->status);
7328     __put_user(host_tx->constant, &target_tx->constant);
7329     __put_user(host_tx->precision, &target_tx->precision);
7330     __put_user(host_tx->tolerance, &target_tx->tolerance);
7331     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7332     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7333     __put_user(host_tx->tick, &target_tx->tick);
7334     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7335     __put_user(host_tx->jitter, &target_tx->jitter);
7336     __put_user(host_tx->shift, &target_tx->shift);
7337     __put_user(host_tx->stabil, &target_tx->stabil);
7338     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7339     __put_user(host_tx->calcnt, &target_tx->calcnt);
7340     __put_user(host_tx->errcnt, &target_tx->errcnt);
7341     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7342     __put_user(host_tx->tai, &target_tx->tai);
7343 
7344     unlock_user_struct(target_tx, target_addr, 1);
7345     return 0;
7346 }
7347 #endif
7348 
7349 
7350 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7351 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7352                                               abi_long target_addr)
7353 {
7354     struct target__kernel_timex *target_tx;
7355 
7356     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7357                                  offsetof(struct target__kernel_timex,
7358                                           time))) {
7359         return -TARGET_EFAULT;
7360     }
7361 
7362     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7363         return -TARGET_EFAULT;
7364     }
7365 
7366     __get_user(host_tx->modes, &target_tx->modes);
7367     __get_user(host_tx->offset, &target_tx->offset);
7368     __get_user(host_tx->freq, &target_tx->freq);
7369     __get_user(host_tx->maxerror, &target_tx->maxerror);
7370     __get_user(host_tx->esterror, &target_tx->esterror);
7371     __get_user(host_tx->status, &target_tx->status);
7372     __get_user(host_tx->constant, &target_tx->constant);
7373     __get_user(host_tx->precision, &target_tx->precision);
7374     __get_user(host_tx->tolerance, &target_tx->tolerance);
7375     __get_user(host_tx->tick, &target_tx->tick);
7376     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7377     __get_user(host_tx->jitter, &target_tx->jitter);
7378     __get_user(host_tx->shift, &target_tx->shift);
7379     __get_user(host_tx->stabil, &target_tx->stabil);
7380     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7381     __get_user(host_tx->calcnt, &target_tx->calcnt);
7382     __get_user(host_tx->errcnt, &target_tx->errcnt);
7383     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7384     __get_user(host_tx->tai, &target_tx->tai);
7385 
7386     unlock_user_struct(target_tx, target_addr, 0);
7387     return 0;
7388 }
7389 
7390 static inline abi_long host_to_target_timex64(abi_long target_addr,
7391                                               struct timex *host_tx)
7392 {
7393     struct target__kernel_timex *target_tx;
7394 
7395    if (copy_to_user_timeval64(target_addr +
7396                               offsetof(struct target__kernel_timex, time),
7397                               &host_tx->time)) {
7398         return -TARGET_EFAULT;
7399     }
7400 
7401     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7402         return -TARGET_EFAULT;
7403     }
7404 
7405     __put_user(host_tx->modes, &target_tx->modes);
7406     __put_user(host_tx->offset, &target_tx->offset);
7407     __put_user(host_tx->freq, &target_tx->freq);
7408     __put_user(host_tx->maxerror, &target_tx->maxerror);
7409     __put_user(host_tx->esterror, &target_tx->esterror);
7410     __put_user(host_tx->status, &target_tx->status);
7411     __put_user(host_tx->constant, &target_tx->constant);
7412     __put_user(host_tx->precision, &target_tx->precision);
7413     __put_user(host_tx->tolerance, &target_tx->tolerance);
7414     __put_user(host_tx->tick, &target_tx->tick);
7415     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7416     __put_user(host_tx->jitter, &target_tx->jitter);
7417     __put_user(host_tx->shift, &target_tx->shift);
7418     __put_user(host_tx->stabil, &target_tx->stabil);
7419     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7420     __put_user(host_tx->calcnt, &target_tx->calcnt);
7421     __put_user(host_tx->errcnt, &target_tx->errcnt);
7422     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7423     __put_user(host_tx->tai, &target_tx->tai);
7424 
7425     unlock_user_struct(target_tx, target_addr, 1);
7426     return 0;
7427 }
7428 #endif
7429 
7430 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7431 #define sigev_notify_thread_id _sigev_un._tid
7432 #endif
7433 
7434 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7435                                                abi_ulong target_addr)
7436 {
7437     struct target_sigevent *target_sevp;
7438 
7439     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7440         return -TARGET_EFAULT;
7441     }
7442 
7443     /* This union is awkward on 64 bit systems because it has a 32 bit
7444      * integer and a pointer in it; we follow the conversion approach
7445      * used for handling sigval types in signal.c so the guest should get
7446      * the correct value back even if we did a 64 bit byteswap and it's
7447      * using the 32 bit integer.
7448      */
7449     host_sevp->sigev_value.sival_ptr =
7450         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7451     host_sevp->sigev_signo =
7452         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7453     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7454     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7455 
7456     unlock_user_struct(target_sevp, target_addr, 1);
7457     return 0;
7458 }
7459 
7460 #if defined(TARGET_NR_mlockall)
7461 static inline int target_to_host_mlockall_arg(int arg)
7462 {
7463     int result = 0;
7464 
7465     if (arg & TARGET_MCL_CURRENT) {
7466         result |= MCL_CURRENT;
7467     }
7468     if (arg & TARGET_MCL_FUTURE) {
7469         result |= MCL_FUTURE;
7470     }
7471 #ifdef MCL_ONFAULT
7472     if (arg & TARGET_MCL_ONFAULT) {
7473         result |= MCL_ONFAULT;
7474     }
7475 #endif
7476 
7477     return result;
7478 }
7479 #endif
7480 
7481 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7482      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7483      defined(TARGET_NR_newfstatat))
7484 static inline abi_long host_to_target_stat64(void *cpu_env,
7485                                              abi_ulong target_addr,
7486                                              struct stat *host_st)
7487 {
7488 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7489     if (((CPUARMState *)cpu_env)->eabi) {
7490         struct target_eabi_stat64 *target_st;
7491 
7492         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7493             return -TARGET_EFAULT;
7494         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7495         __put_user(host_st->st_dev, &target_st->st_dev);
7496         __put_user(host_st->st_ino, &target_st->st_ino);
7497 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7498         __put_user(host_st->st_ino, &target_st->__st_ino);
7499 #endif
7500         __put_user(host_st->st_mode, &target_st->st_mode);
7501         __put_user(host_st->st_nlink, &target_st->st_nlink);
7502         __put_user(host_st->st_uid, &target_st->st_uid);
7503         __put_user(host_st->st_gid, &target_st->st_gid);
7504         __put_user(host_st->st_rdev, &target_st->st_rdev);
7505         __put_user(host_st->st_size, &target_st->st_size);
7506         __put_user(host_st->st_blksize, &target_st->st_blksize);
7507         __put_user(host_st->st_blocks, &target_st->st_blocks);
7508         __put_user(host_st->st_atime, &target_st->target_st_atime);
7509         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7510         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7511 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7512         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7513         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7514         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7515 #endif
7516         unlock_user_struct(target_st, target_addr, 1);
7517     } else
7518 #endif
7519     {
7520 #if defined(TARGET_HAS_STRUCT_STAT64)
7521         struct target_stat64 *target_st;
7522 #else
7523         struct target_stat *target_st;
7524 #endif
7525 
7526         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7527             return -TARGET_EFAULT;
7528         memset(target_st, 0, sizeof(*target_st));
7529         __put_user(host_st->st_dev, &target_st->st_dev);
7530         __put_user(host_st->st_ino, &target_st->st_ino);
7531 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7532         __put_user(host_st->st_ino, &target_st->__st_ino);
7533 #endif
7534         __put_user(host_st->st_mode, &target_st->st_mode);
7535         __put_user(host_st->st_nlink, &target_st->st_nlink);
7536         __put_user(host_st->st_uid, &target_st->st_uid);
7537         __put_user(host_st->st_gid, &target_st->st_gid);
7538         __put_user(host_st->st_rdev, &target_st->st_rdev);
7539         /* XXX: better use of kernel struct */
7540         __put_user(host_st->st_size, &target_st->st_size);
7541         __put_user(host_st->st_blksize, &target_st->st_blksize);
7542         __put_user(host_st->st_blocks, &target_st->st_blocks);
7543         __put_user(host_st->st_atime, &target_st->target_st_atime);
7544         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7545         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7546 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7547         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7548         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7549         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7550 #endif
7551         unlock_user_struct(target_st, target_addr, 1);
7552     }
7553 
7554     return 0;
7555 }
7556 #endif
7557 
7558 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7559 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7560                                             abi_ulong target_addr)
7561 {
7562     struct target_statx *target_stx;
7563 
7564     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7565         return -TARGET_EFAULT;
7566     }
7567     memset(target_stx, 0, sizeof(*target_stx));
7568 
7569     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7570     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7571     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7572     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7573     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7574     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7575     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7576     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7577     __put_user(host_stx->stx_size, &target_stx->stx_size);
7578     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7579     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7580     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7581     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7582     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7583     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7584     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7585     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7586     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7587     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7588     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7589     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7590     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7591     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7592 
7593     unlock_user_struct(target_stx, target_addr, 1);
7594 
7595     return 0;
7596 }
7597 #endif
7598 
7599 static int do_sys_futex(int *uaddr, int op, int val,
7600                          const struct timespec *timeout, int *uaddr2,
7601                          int val3)
7602 {
7603 #if HOST_LONG_BITS == 64
7604 #if defined(__NR_futex)
7605     /* always a 64-bit time_t, it doesn't define _time64 version  */
7606     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7607 
7608 #endif
7609 #else /* HOST_LONG_BITS == 64 */
7610 #if defined(__NR_futex_time64)
7611     if (sizeof(timeout->tv_sec) == 8) {
7612         /* _time64 function on 32bit arch */
7613         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7614     }
7615 #endif
7616 #if defined(__NR_futex)
7617     /* old function on 32bit arch */
7618     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7619 #endif
7620 #endif /* HOST_LONG_BITS == 64 */
7621     g_assert_not_reached();
7622 }
7623 
7624 static int do_safe_futex(int *uaddr, int op, int val,
7625                          const struct timespec *timeout, int *uaddr2,
7626                          int val3)
7627 {
7628 #if HOST_LONG_BITS == 64
7629 #if defined(__NR_futex)
7630     /* always a 64-bit time_t, it doesn't define _time64 version  */
7631     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7632 #endif
7633 #else /* HOST_LONG_BITS == 64 */
7634 #if defined(__NR_futex_time64)
7635     if (sizeof(timeout->tv_sec) == 8) {
7636         /* _time64 function on 32bit arch */
7637         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7638                                            val3));
7639     }
7640 #endif
7641 #if defined(__NR_futex)
7642     /* old function on 32bit arch */
7643     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7644 #endif
7645 #endif /* HOST_LONG_BITS == 64 */
7646     return -TARGET_ENOSYS;
7647 }
7648 
7649 /* ??? Using host futex calls even when target atomic operations
7650    are not really atomic probably breaks things.  However implementing
7651    futexes locally would make futexes shared between multiple processes
7652    tricky.  However they're probably useless because guest atomic
7653    operations won't work either.  */
7654 #if defined(TARGET_NR_futex)
7655 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7656                     target_ulong timeout, target_ulong uaddr2, int val3)
7657 {
7658     struct timespec ts, *pts;
7659     int base_op;
7660 
7661     /* ??? We assume FUTEX_* constants are the same on both host
7662        and target.  */
7663 #ifdef FUTEX_CMD_MASK
7664     base_op = op & FUTEX_CMD_MASK;
7665 #else
7666     base_op = op;
7667 #endif
7668     switch (base_op) {
7669     case FUTEX_WAIT:
7670     case FUTEX_WAIT_BITSET:
7671         if (timeout) {
7672             pts = &ts;
7673             target_to_host_timespec(pts, timeout);
7674         } else {
7675             pts = NULL;
7676         }
7677         return do_safe_futex(g2h(cpu, uaddr),
7678                              op, tswap32(val), pts, NULL, val3);
7679     case FUTEX_WAKE:
7680         return do_safe_futex(g2h(cpu, uaddr),
7681                              op, val, NULL, NULL, 0);
7682     case FUTEX_FD:
7683         return do_safe_futex(g2h(cpu, uaddr),
7684                              op, val, NULL, NULL, 0);
7685     case FUTEX_REQUEUE:
7686     case FUTEX_CMP_REQUEUE:
7687     case FUTEX_WAKE_OP:
7688         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7689            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7690            But the prototype takes a `struct timespec *'; insert casts
7691            to satisfy the compiler.  We do not need to tswap TIMEOUT
7692            since it's not compared to guest memory.  */
7693         pts = (struct timespec *)(uintptr_t) timeout;
7694         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7695                              (base_op == FUTEX_CMP_REQUEUE
7696                               ? tswap32(val3) : val3));
7697     default:
7698         return -TARGET_ENOSYS;
7699     }
7700 }
7701 #endif
7702 
7703 #if defined(TARGET_NR_futex_time64)
7704 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7705                            int val, target_ulong timeout,
7706                            target_ulong uaddr2, int val3)
7707 {
7708     struct timespec ts, *pts;
7709     int base_op;
7710 
7711     /* ??? We assume FUTEX_* constants are the same on both host
7712        and target.  */
7713 #ifdef FUTEX_CMD_MASK
7714     base_op = op & FUTEX_CMD_MASK;
7715 #else
7716     base_op = op;
7717 #endif
7718     switch (base_op) {
7719     case FUTEX_WAIT:
7720     case FUTEX_WAIT_BITSET:
7721         if (timeout) {
7722             pts = &ts;
7723             if (target_to_host_timespec64(pts, timeout)) {
7724                 return -TARGET_EFAULT;
7725             }
7726         } else {
7727             pts = NULL;
7728         }
7729         return do_safe_futex(g2h(cpu, uaddr), op,
7730                              tswap32(val), pts, NULL, val3);
7731     case FUTEX_WAKE:
7732         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7733     case FUTEX_FD:
7734         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7735     case FUTEX_REQUEUE:
7736     case FUTEX_CMP_REQUEUE:
7737     case FUTEX_WAKE_OP:
7738         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7739            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7740            But the prototype takes a `struct timespec *'; insert casts
7741            to satisfy the compiler.  We do not need to tswap TIMEOUT
7742            since it's not compared to guest memory.  */
7743         pts = (struct timespec *)(uintptr_t) timeout;
7744         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7745                              (base_op == FUTEX_CMP_REQUEUE
7746                               ? tswap32(val3) : val3));
7747     default:
7748         return -TARGET_ENOSYS;
7749     }
7750 }
7751 #endif
7752 
7753 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7754 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7755                                      abi_long handle, abi_long mount_id,
7756                                      abi_long flags)
7757 {
7758     struct file_handle *target_fh;
7759     struct file_handle *fh;
7760     int mid = 0;
7761     abi_long ret;
7762     char *name;
7763     unsigned int size, total_size;
7764 
7765     if (get_user_s32(size, handle)) {
7766         return -TARGET_EFAULT;
7767     }
7768 
7769     name = lock_user_string(pathname);
7770     if (!name) {
7771         return -TARGET_EFAULT;
7772     }
7773 
7774     total_size = sizeof(struct file_handle) + size;
7775     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7776     if (!target_fh) {
7777         unlock_user(name, pathname, 0);
7778         return -TARGET_EFAULT;
7779     }
7780 
7781     fh = g_malloc0(total_size);
7782     fh->handle_bytes = size;
7783 
7784     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7785     unlock_user(name, pathname, 0);
7786 
7787     /* man name_to_handle_at(2):
7788      * Other than the use of the handle_bytes field, the caller should treat
7789      * the file_handle structure as an opaque data type
7790      */
7791 
7792     memcpy(target_fh, fh, total_size);
7793     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7794     target_fh->handle_type = tswap32(fh->handle_type);
7795     g_free(fh);
7796     unlock_user(target_fh, handle, total_size);
7797 
7798     if (put_user_s32(mid, mount_id)) {
7799         return -TARGET_EFAULT;
7800     }
7801 
7802     return ret;
7803 
7804 }
7805 #endif
7806 
7807 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7808 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7809                                      abi_long flags)
7810 {
7811     struct file_handle *target_fh;
7812     struct file_handle *fh;
7813     unsigned int size, total_size;
7814     abi_long ret;
7815 
7816     if (get_user_s32(size, handle)) {
7817         return -TARGET_EFAULT;
7818     }
7819 
7820     total_size = sizeof(struct file_handle) + size;
7821     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7822     if (!target_fh) {
7823         return -TARGET_EFAULT;
7824     }
7825 
7826     fh = g_memdup(target_fh, total_size);
7827     fh->handle_bytes = size;
7828     fh->handle_type = tswap32(target_fh->handle_type);
7829 
7830     ret = get_errno(open_by_handle_at(mount_fd, fh,
7831                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7832 
7833     g_free(fh);
7834 
7835     unlock_user(target_fh, handle, total_size);
7836 
7837     return ret;
7838 }
7839 #endif
7840 
7841 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7842 
7843 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7844 {
7845     int host_flags;
7846     target_sigset_t *target_mask;
7847     sigset_t host_mask;
7848     abi_long ret;
7849 
7850     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7851         return -TARGET_EINVAL;
7852     }
7853     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7854         return -TARGET_EFAULT;
7855     }
7856 
7857     target_to_host_sigset(&host_mask, target_mask);
7858 
7859     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7860 
7861     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7862     if (ret >= 0) {
7863         fd_trans_register(ret, &target_signalfd_trans);
7864     }
7865 
7866     unlock_user_struct(target_mask, mask, 0);
7867 
7868     return ret;
7869 }
7870 #endif
7871 
7872 /* Map host to target signal numbers for the wait family of syscalls.
7873    Assume all other status bits are the same.  */
7874 int host_to_target_waitstatus(int status)
7875 {
7876     if (WIFSIGNALED(status)) {
7877         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7878     }
7879     if (WIFSTOPPED(status)) {
7880         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7881                | (status & 0xff);
7882     }
7883     return status;
7884 }
7885 
7886 static int open_self_cmdline(void *cpu_env, int fd)
7887 {
7888     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7889     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7890     int i;
7891 
7892     for (i = 0; i < bprm->argc; i++) {
7893         size_t len = strlen(bprm->argv[i]) + 1;
7894 
7895         if (write(fd, bprm->argv[i], len) != len) {
7896             return -1;
7897         }
7898     }
7899 
7900     return 0;
7901 }
7902 
7903 static int open_self_maps(void *cpu_env, int fd)
7904 {
7905     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7906     TaskState *ts = cpu->opaque;
7907     GSList *map_info = read_self_maps();
7908     GSList *s;
7909     int count;
7910 
7911     for (s = map_info; s; s = g_slist_next(s)) {
7912         MapInfo *e = (MapInfo *) s->data;
7913 
7914         if (h2g_valid(e->start)) {
7915             unsigned long min = e->start;
7916             unsigned long max = e->end;
7917             int flags = page_get_flags(h2g(min));
7918             const char *path;
7919 
7920             max = h2g_valid(max - 1) ?
7921                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7922 
7923             if (page_check_range(h2g(min), max - min, flags) == -1) {
7924                 continue;
7925             }
7926 
7927             if (h2g(min) == ts->info->stack_limit) {
7928                 path = "[stack]";
7929             } else {
7930                 path = e->path;
7931             }
7932 
7933             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7934                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7935                             h2g(min), h2g(max - 1) + 1,
7936                             (flags & PAGE_READ) ? 'r' : '-',
7937                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7938                             (flags & PAGE_EXEC) ? 'x' : '-',
7939                             e->is_priv ? 'p' : '-',
7940                             (uint64_t) e->offset, e->dev, e->inode);
7941             if (path) {
7942                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7943             } else {
7944                 dprintf(fd, "\n");
7945             }
7946         }
7947     }
7948 
7949     free_self_maps(map_info);
7950 
7951 #ifdef TARGET_VSYSCALL_PAGE
7952     /*
7953      * We only support execution from the vsyscall page.
7954      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7955      */
7956     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7957                     " --xp 00000000 00:00 0",
7958                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7959     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7960 #endif
7961 
7962     return 0;
7963 }
7964 
7965 static int open_self_stat(void *cpu_env, int fd)
7966 {
7967     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7968     TaskState *ts = cpu->opaque;
7969     g_autoptr(GString) buf = g_string_new(NULL);
7970     int i;
7971 
7972     for (i = 0; i < 44; i++) {
7973         if (i == 0) {
7974             /* pid */
7975             g_string_printf(buf, FMT_pid " ", getpid());
7976         } else if (i == 1) {
7977             /* app name */
7978             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7979             bin = bin ? bin + 1 : ts->bprm->argv[0];
7980             g_string_printf(buf, "(%.15s) ", bin);
7981         } else if (i == 3) {
7982             /* ppid */
7983             g_string_printf(buf, FMT_pid " ", getppid());
7984         } else if (i == 27) {
7985             /* stack bottom */
7986             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7987         } else {
7988             /* for the rest, there is MasterCard */
7989             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7990         }
7991 
7992         if (write(fd, buf->str, buf->len) != buf->len) {
7993             return -1;
7994         }
7995     }
7996 
7997     return 0;
7998 }
7999 
8000 static int open_self_auxv(void *cpu_env, int fd)
8001 {
8002     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8003     TaskState *ts = cpu->opaque;
8004     abi_ulong auxv = ts->info->saved_auxv;
8005     abi_ulong len = ts->info->auxv_len;
8006     char *ptr;
8007 
8008     /*
8009      * Auxiliary vector is stored in target process stack.
8010      * read in whole auxv vector and copy it to file
8011      */
8012     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8013     if (ptr != NULL) {
8014         while (len > 0) {
8015             ssize_t r;
8016             r = write(fd, ptr, len);
8017             if (r <= 0) {
8018                 break;
8019             }
8020             len -= r;
8021             ptr += r;
8022         }
8023         lseek(fd, 0, SEEK_SET);
8024         unlock_user(ptr, auxv, len);
8025     }
8026 
8027     return 0;
8028 }
8029 
8030 static int is_proc_myself(const char *filename, const char *entry)
8031 {
8032     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8033         filename += strlen("/proc/");
8034         if (!strncmp(filename, "self/", strlen("self/"))) {
8035             filename += strlen("self/");
8036         } else if (*filename >= '1' && *filename <= '9') {
8037             char myself[80];
8038             snprintf(myself, sizeof(myself), "%d/", getpid());
8039             if (!strncmp(filename, myself, strlen(myself))) {
8040                 filename += strlen(myself);
8041             } else {
8042                 return 0;
8043             }
8044         } else {
8045             return 0;
8046         }
8047         if (!strcmp(filename, entry)) {
8048             return 1;
8049         }
8050     }
8051     return 0;
8052 }
8053 
8054 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8055     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8056 static int is_proc(const char *filename, const char *entry)
8057 {
8058     return strcmp(filename, entry) == 0;
8059 }
8060 #endif
8061 
8062 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8063 static int open_net_route(void *cpu_env, int fd)
8064 {
8065     FILE *fp;
8066     char *line = NULL;
8067     size_t len = 0;
8068     ssize_t read;
8069 
8070     fp = fopen("/proc/net/route", "r");
8071     if (fp == NULL) {
8072         return -1;
8073     }
8074 
8075     /* read header */
8076 
8077     read = getline(&line, &len, fp);
8078     dprintf(fd, "%s", line);
8079 
8080     /* read routes */
8081 
8082     while ((read = getline(&line, &len, fp)) != -1) {
8083         char iface[16];
8084         uint32_t dest, gw, mask;
8085         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8086         int fields;
8087 
8088         fields = sscanf(line,
8089                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8090                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8091                         &mask, &mtu, &window, &irtt);
8092         if (fields != 11) {
8093             continue;
8094         }
8095         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8096                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8097                 metric, tswap32(mask), mtu, window, irtt);
8098     }
8099 
8100     free(line);
8101     fclose(fp);
8102 
8103     return 0;
8104 }
8105 #endif
8106 
8107 #if defined(TARGET_SPARC)
8108 static int open_cpuinfo(void *cpu_env, int fd)
8109 {
8110     dprintf(fd, "type\t\t: sun4u\n");
8111     return 0;
8112 }
8113 #endif
8114 
8115 #if defined(TARGET_HPPA)
8116 static int open_cpuinfo(void *cpu_env, int fd)
8117 {
8118     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8119     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8120     dprintf(fd, "capabilities\t: os32\n");
8121     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8122     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8123     return 0;
8124 }
8125 #endif
8126 
8127 #if defined(TARGET_M68K)
8128 static int open_hardware(void *cpu_env, int fd)
8129 {
8130     dprintf(fd, "Model:\t\tqemu-m68k\n");
8131     return 0;
8132 }
8133 #endif
8134 
8135 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8136 {
8137     struct fake_open {
8138         const char *filename;
8139         int (*fill)(void *cpu_env, int fd);
8140         int (*cmp)(const char *s1, const char *s2);
8141     };
8142     const struct fake_open *fake_open;
8143     static const struct fake_open fakes[] = {
8144         { "maps", open_self_maps, is_proc_myself },
8145         { "stat", open_self_stat, is_proc_myself },
8146         { "auxv", open_self_auxv, is_proc_myself },
8147         { "cmdline", open_self_cmdline, is_proc_myself },
8148 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8149         { "/proc/net/route", open_net_route, is_proc },
8150 #endif
8151 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8152         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8153 #endif
8154 #if defined(TARGET_M68K)
8155         { "/proc/hardware", open_hardware, is_proc },
8156 #endif
8157         { NULL, NULL, NULL }
8158     };
8159 
8160     if (is_proc_myself(pathname, "exe")) {
8161         int execfd = qemu_getauxval(AT_EXECFD);
8162         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8163     }
8164 
8165     for (fake_open = fakes; fake_open->filename; fake_open++) {
8166         if (fake_open->cmp(pathname, fake_open->filename)) {
8167             break;
8168         }
8169     }
8170 
8171     if (fake_open->filename) {
8172         const char *tmpdir;
8173         char filename[PATH_MAX];
8174         int fd, r;
8175 
8176         /* create temporary file to map stat to */
8177         tmpdir = getenv("TMPDIR");
8178         if (!tmpdir)
8179             tmpdir = "/tmp";
8180         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8181         fd = mkstemp(filename);
8182         if (fd < 0) {
8183             return fd;
8184         }
8185         unlink(filename);
8186 
8187         if ((r = fake_open->fill(cpu_env, fd))) {
8188             int e = errno;
8189             close(fd);
8190             errno = e;
8191             return r;
8192         }
8193         lseek(fd, 0, SEEK_SET);
8194 
8195         return fd;
8196     }
8197 
8198     return safe_openat(dirfd, path(pathname), flags, mode);
8199 }
8200 
8201 #define TIMER_MAGIC 0x0caf0000
8202 #define TIMER_MAGIC_MASK 0xffff0000
8203 
8204 /* Convert QEMU provided timer ID back to internal 16bit index format */
8205 static target_timer_t get_timer_id(abi_long arg)
8206 {
8207     target_timer_t timerid = arg;
8208 
8209     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8210         return -TARGET_EINVAL;
8211     }
8212 
8213     timerid &= 0xffff;
8214 
8215     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8216         return -TARGET_EINVAL;
8217     }
8218 
8219     return timerid;
8220 }
8221 
8222 static int target_to_host_cpu_mask(unsigned long *host_mask,
8223                                    size_t host_size,
8224                                    abi_ulong target_addr,
8225                                    size_t target_size)
8226 {
8227     unsigned target_bits = sizeof(abi_ulong) * 8;
8228     unsigned host_bits = sizeof(*host_mask) * 8;
8229     abi_ulong *target_mask;
8230     unsigned i, j;
8231 
8232     assert(host_size >= target_size);
8233 
8234     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8235     if (!target_mask) {
8236         return -TARGET_EFAULT;
8237     }
8238     memset(host_mask, 0, host_size);
8239 
8240     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8241         unsigned bit = i * target_bits;
8242         abi_ulong val;
8243 
8244         __get_user(val, &target_mask[i]);
8245         for (j = 0; j < target_bits; j++, bit++) {
8246             if (val & (1UL << j)) {
8247                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8248             }
8249         }
8250     }
8251 
8252     unlock_user(target_mask, target_addr, 0);
8253     return 0;
8254 }
8255 
8256 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8257                                    size_t host_size,
8258                                    abi_ulong target_addr,
8259                                    size_t target_size)
8260 {
8261     unsigned target_bits = sizeof(abi_ulong) * 8;
8262     unsigned host_bits = sizeof(*host_mask) * 8;
8263     abi_ulong *target_mask;
8264     unsigned i, j;
8265 
8266     assert(host_size >= target_size);
8267 
8268     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8269     if (!target_mask) {
8270         return -TARGET_EFAULT;
8271     }
8272 
8273     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8274         unsigned bit = i * target_bits;
8275         abi_ulong val = 0;
8276 
8277         for (j = 0; j < target_bits; j++, bit++) {
8278             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8279                 val |= 1UL << j;
8280             }
8281         }
8282         __put_user(val, &target_mask[i]);
8283     }
8284 
8285     unlock_user(target_mask, target_addr, target_size);
8286     return 0;
8287 }
8288 
8289 #ifdef TARGET_NR_getdents
8290 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8291 {
8292     g_autofree void *hdirp = NULL;
8293     void *tdirp;
8294     int hlen, hoff, toff;
8295     int hreclen, treclen;
8296     off64_t prev_diroff = 0;
8297 
8298     hdirp = g_try_malloc(count);
8299     if (!hdirp) {
8300         return -TARGET_ENOMEM;
8301     }
8302 
8303 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8304     hlen = sys_getdents(dirfd, hdirp, count);
8305 #else
8306     hlen = sys_getdents64(dirfd, hdirp, count);
8307 #endif
8308 
8309     hlen = get_errno(hlen);
8310     if (is_error(hlen)) {
8311         return hlen;
8312     }
8313 
8314     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8315     if (!tdirp) {
8316         return -TARGET_EFAULT;
8317     }
8318 
8319     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8320 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8321         struct linux_dirent *hde = hdirp + hoff;
8322 #else
8323         struct linux_dirent64 *hde = hdirp + hoff;
8324 #endif
8325         struct target_dirent *tde = tdirp + toff;
8326         int namelen;
8327         uint8_t type;
8328 
8329         namelen = strlen(hde->d_name);
8330         hreclen = hde->d_reclen;
8331         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8332         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8333 
8334         if (toff + treclen > count) {
8335             /*
8336              * If the host struct is smaller than the target struct, or
8337              * requires less alignment and thus packs into less space,
8338              * then the host can return more entries than we can pass
8339              * on to the guest.
8340              */
8341             if (toff == 0) {
8342                 toff = -TARGET_EINVAL; /* result buffer is too small */
8343                 break;
8344             }
8345             /*
8346              * Return what we have, resetting the file pointer to the
8347              * location of the first record not returned.
8348              */
8349             lseek64(dirfd, prev_diroff, SEEK_SET);
8350             break;
8351         }
8352 
8353         prev_diroff = hde->d_off;
8354         tde->d_ino = tswapal(hde->d_ino);
8355         tde->d_off = tswapal(hde->d_off);
8356         tde->d_reclen = tswap16(treclen);
8357         memcpy(tde->d_name, hde->d_name, namelen + 1);
8358 
8359         /*
8360          * The getdents type is in what was formerly a padding byte at the
8361          * end of the structure.
8362          */
8363 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8364         type = *((uint8_t *)hde + hreclen - 1);
8365 #else
8366         type = hde->d_type;
8367 #endif
8368         *((uint8_t *)tde + treclen - 1) = type;
8369     }
8370 
8371     unlock_user(tdirp, arg2, toff);
8372     return toff;
8373 }
8374 #endif /* TARGET_NR_getdents */
8375 
8376 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8377 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8378 {
8379     g_autofree void *hdirp = NULL;
8380     void *tdirp;
8381     int hlen, hoff, toff;
8382     int hreclen, treclen;
8383     off64_t prev_diroff = 0;
8384 
8385     hdirp = g_try_malloc(count);
8386     if (!hdirp) {
8387         return -TARGET_ENOMEM;
8388     }
8389 
8390     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8391     if (is_error(hlen)) {
8392         return hlen;
8393     }
8394 
8395     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8396     if (!tdirp) {
8397         return -TARGET_EFAULT;
8398     }
8399 
8400     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8401         struct linux_dirent64 *hde = hdirp + hoff;
8402         struct target_dirent64 *tde = tdirp + toff;
8403         int namelen;
8404 
8405         namelen = strlen(hde->d_name) + 1;
8406         hreclen = hde->d_reclen;
8407         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8408         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8409 
8410         if (toff + treclen > count) {
8411             /*
8412              * If the host struct is smaller than the target struct, or
8413              * requires less alignment and thus packs into less space,
8414              * then the host can return more entries than we can pass
8415              * on to the guest.
8416              */
8417             if (toff == 0) {
8418                 toff = -TARGET_EINVAL; /* result buffer is too small */
8419                 break;
8420             }
8421             /*
8422              * Return what we have, resetting the file pointer to the
8423              * location of the first record not returned.
8424              */
8425             lseek64(dirfd, prev_diroff, SEEK_SET);
8426             break;
8427         }
8428 
8429         prev_diroff = hde->d_off;
8430         tde->d_ino = tswap64(hde->d_ino);
8431         tde->d_off = tswap64(hde->d_off);
8432         tde->d_reclen = tswap16(treclen);
8433         tde->d_type = hde->d_type;
8434         memcpy(tde->d_name, hde->d_name, namelen);
8435     }
8436 
8437     unlock_user(tdirp, arg2, toff);
8438     return toff;
8439 }
8440 #endif /* TARGET_NR_getdents64 */
8441 
8442 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8443 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8444 #endif
8445 
8446 /* This is an internal helper for do_syscall so that it is easier
8447  * to have a single return point, so that actions, such as logging
8448  * of syscall results, can be performed.
8449  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8450  */
8451 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8452                             abi_long arg2, abi_long arg3, abi_long arg4,
8453                             abi_long arg5, abi_long arg6, abi_long arg7,
8454                             abi_long arg8)
8455 {
8456     CPUState *cpu = env_cpu(cpu_env);
8457     abi_long ret;
8458 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8459     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8460     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8461     || defined(TARGET_NR_statx)
8462     struct stat st;
8463 #endif
8464 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8465     || defined(TARGET_NR_fstatfs)
8466     struct statfs stfs;
8467 #endif
8468     void *p;
8469 
8470     switch(num) {
8471     case TARGET_NR_exit:
8472         /* In old applications this may be used to implement _exit(2).
8473            However in threaded applications it is used for thread termination,
8474            and _exit_group is used for application termination.
8475            Do thread termination if we have more then one thread.  */
8476 
8477         if (block_signals()) {
8478             return -QEMU_ERESTARTSYS;
8479         }
8480 
8481         pthread_mutex_lock(&clone_lock);
8482 
8483         if (CPU_NEXT(first_cpu)) {
8484             TaskState *ts = cpu->opaque;
8485 
8486             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8487             object_unref(OBJECT(cpu));
8488             /*
8489              * At this point the CPU should be unrealized and removed
8490              * from cpu lists. We can clean-up the rest of the thread
8491              * data without the lock held.
8492              */
8493 
8494             pthread_mutex_unlock(&clone_lock);
8495 
8496             if (ts->child_tidptr) {
8497                 put_user_u32(0, ts->child_tidptr);
8498                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8499                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8500             }
8501             thread_cpu = NULL;
8502             g_free(ts);
8503             rcu_unregister_thread();
8504             pthread_exit(NULL);
8505         }
8506 
8507         pthread_mutex_unlock(&clone_lock);
8508         preexit_cleanup(cpu_env, arg1);
8509         _exit(arg1);
8510         return 0; /* avoid warning */
8511     case TARGET_NR_read:
8512         if (arg2 == 0 && arg3 == 0) {
8513             return get_errno(safe_read(arg1, 0, 0));
8514         } else {
8515             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8516                 return -TARGET_EFAULT;
8517             ret = get_errno(safe_read(arg1, p, arg3));
8518             if (ret >= 0 &&
8519                 fd_trans_host_to_target_data(arg1)) {
8520                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8521             }
8522             unlock_user(p, arg2, ret);
8523         }
8524         return ret;
8525     case TARGET_NR_write:
8526         if (arg2 == 0 && arg3 == 0) {
8527             return get_errno(safe_write(arg1, 0, 0));
8528         }
8529         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8530             return -TARGET_EFAULT;
8531         if (fd_trans_target_to_host_data(arg1)) {
8532             void *copy = g_malloc(arg3);
8533             memcpy(copy, p, arg3);
8534             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8535             if (ret >= 0) {
8536                 ret = get_errno(safe_write(arg1, copy, ret));
8537             }
8538             g_free(copy);
8539         } else {
8540             ret = get_errno(safe_write(arg1, p, arg3));
8541         }
8542         unlock_user(p, arg2, 0);
8543         return ret;
8544 
8545 #ifdef TARGET_NR_open
8546     case TARGET_NR_open:
8547         if (!(p = lock_user_string(arg1)))
8548             return -TARGET_EFAULT;
8549         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8550                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8551                                   arg3));
8552         fd_trans_unregister(ret);
8553         unlock_user(p, arg1, 0);
8554         return ret;
8555 #endif
8556     case TARGET_NR_openat:
8557         if (!(p = lock_user_string(arg2)))
8558             return -TARGET_EFAULT;
8559         ret = get_errno(do_openat(cpu_env, arg1, p,
8560                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8561                                   arg4));
8562         fd_trans_unregister(ret);
8563         unlock_user(p, arg2, 0);
8564         return ret;
8565 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8566     case TARGET_NR_name_to_handle_at:
8567         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8568         return ret;
8569 #endif
8570 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8571     case TARGET_NR_open_by_handle_at:
8572         ret = do_open_by_handle_at(arg1, arg2, arg3);
8573         fd_trans_unregister(ret);
8574         return ret;
8575 #endif
8576     case TARGET_NR_close:
8577         fd_trans_unregister(arg1);
8578         return get_errno(close(arg1));
8579 
8580     case TARGET_NR_brk:
8581         return do_brk(arg1);
8582 #ifdef TARGET_NR_fork
8583     case TARGET_NR_fork:
8584         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8585 #endif
8586 #ifdef TARGET_NR_waitpid
8587     case TARGET_NR_waitpid:
8588         {
8589             int status;
8590             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8591             if (!is_error(ret) && arg2 && ret
8592                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8593                 return -TARGET_EFAULT;
8594         }
8595         return ret;
8596 #endif
8597 #ifdef TARGET_NR_waitid
8598     case TARGET_NR_waitid:
8599         {
8600             siginfo_t info;
8601             info.si_pid = 0;
8602             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8603             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8604                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8605                     return -TARGET_EFAULT;
8606                 host_to_target_siginfo(p, &info);
8607                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8608             }
8609         }
8610         return ret;
8611 #endif
8612 #ifdef TARGET_NR_creat /* not on alpha */
8613     case TARGET_NR_creat:
8614         if (!(p = lock_user_string(arg1)))
8615             return -TARGET_EFAULT;
8616         ret = get_errno(creat(p, arg2));
8617         fd_trans_unregister(ret);
8618         unlock_user(p, arg1, 0);
8619         return ret;
8620 #endif
8621 #ifdef TARGET_NR_link
8622     case TARGET_NR_link:
8623         {
8624             void * p2;
8625             p = lock_user_string(arg1);
8626             p2 = lock_user_string(arg2);
8627             if (!p || !p2)
8628                 ret = -TARGET_EFAULT;
8629             else
8630                 ret = get_errno(link(p, p2));
8631             unlock_user(p2, arg2, 0);
8632             unlock_user(p, arg1, 0);
8633         }
8634         return ret;
8635 #endif
8636 #if defined(TARGET_NR_linkat)
8637     case TARGET_NR_linkat:
8638         {
8639             void * p2 = NULL;
8640             if (!arg2 || !arg4)
8641                 return -TARGET_EFAULT;
8642             p  = lock_user_string(arg2);
8643             p2 = lock_user_string(arg4);
8644             if (!p || !p2)
8645                 ret = -TARGET_EFAULT;
8646             else
8647                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8648             unlock_user(p, arg2, 0);
8649             unlock_user(p2, arg4, 0);
8650         }
8651         return ret;
8652 #endif
8653 #ifdef TARGET_NR_unlink
8654     case TARGET_NR_unlink:
8655         if (!(p = lock_user_string(arg1)))
8656             return -TARGET_EFAULT;
8657         ret = get_errno(unlink(p));
8658         unlock_user(p, arg1, 0);
8659         return ret;
8660 #endif
8661 #if defined(TARGET_NR_unlinkat)
8662     case TARGET_NR_unlinkat:
8663         if (!(p = lock_user_string(arg2)))
8664             return -TARGET_EFAULT;
8665         ret = get_errno(unlinkat(arg1, p, arg3));
8666         unlock_user(p, arg2, 0);
8667         return ret;
8668 #endif
8669     case TARGET_NR_execve:
8670         {
8671             char **argp, **envp;
8672             int argc, envc;
8673             abi_ulong gp;
8674             abi_ulong guest_argp;
8675             abi_ulong guest_envp;
8676             abi_ulong addr;
8677             char **q;
8678 
8679             argc = 0;
8680             guest_argp = arg2;
8681             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8682                 if (get_user_ual(addr, gp))
8683                     return -TARGET_EFAULT;
8684                 if (!addr)
8685                     break;
8686                 argc++;
8687             }
8688             envc = 0;
8689             guest_envp = arg3;
8690             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8691                 if (get_user_ual(addr, gp))
8692                     return -TARGET_EFAULT;
8693                 if (!addr)
8694                     break;
8695                 envc++;
8696             }
8697 
8698             argp = g_new0(char *, argc + 1);
8699             envp = g_new0(char *, envc + 1);
8700 
8701             for (gp = guest_argp, q = argp; gp;
8702                   gp += sizeof(abi_ulong), q++) {
8703                 if (get_user_ual(addr, gp))
8704                     goto execve_efault;
8705                 if (!addr)
8706                     break;
8707                 if (!(*q = lock_user_string(addr)))
8708                     goto execve_efault;
8709             }
8710             *q = NULL;
8711 
8712             for (gp = guest_envp, q = envp; gp;
8713                   gp += sizeof(abi_ulong), q++) {
8714                 if (get_user_ual(addr, gp))
8715                     goto execve_efault;
8716                 if (!addr)
8717                     break;
8718                 if (!(*q = lock_user_string(addr)))
8719                     goto execve_efault;
8720             }
8721             *q = NULL;
8722 
8723             if (!(p = lock_user_string(arg1)))
8724                 goto execve_efault;
8725             /* Although execve() is not an interruptible syscall it is
8726              * a special case where we must use the safe_syscall wrapper:
8727              * if we allow a signal to happen before we make the host
8728              * syscall then we will 'lose' it, because at the point of
8729              * execve the process leaves QEMU's control. So we use the
8730              * safe syscall wrapper to ensure that we either take the
8731              * signal as a guest signal, or else it does not happen
8732              * before the execve completes and makes it the other
8733              * program's problem.
8734              */
8735             ret = get_errno(safe_execve(p, argp, envp));
8736             unlock_user(p, arg1, 0);
8737 
8738             goto execve_end;
8739 
8740         execve_efault:
8741             ret = -TARGET_EFAULT;
8742 
8743         execve_end:
8744             for (gp = guest_argp, q = argp; *q;
8745                   gp += sizeof(abi_ulong), q++) {
8746                 if (get_user_ual(addr, gp)
8747                     || !addr)
8748                     break;
8749                 unlock_user(*q, addr, 0);
8750             }
8751             for (gp = guest_envp, q = envp; *q;
8752                   gp += sizeof(abi_ulong), q++) {
8753                 if (get_user_ual(addr, gp)
8754                     || !addr)
8755                     break;
8756                 unlock_user(*q, addr, 0);
8757             }
8758 
8759             g_free(argp);
8760             g_free(envp);
8761         }
8762         return ret;
8763     case TARGET_NR_chdir:
8764         if (!(p = lock_user_string(arg1)))
8765             return -TARGET_EFAULT;
8766         ret = get_errno(chdir(p));
8767         unlock_user(p, arg1, 0);
8768         return ret;
8769 #ifdef TARGET_NR_time
8770     case TARGET_NR_time:
8771         {
8772             time_t host_time;
8773             ret = get_errno(time(&host_time));
8774             if (!is_error(ret)
8775                 && arg1
8776                 && put_user_sal(host_time, arg1))
8777                 return -TARGET_EFAULT;
8778         }
8779         return ret;
8780 #endif
8781 #ifdef TARGET_NR_mknod
8782     case TARGET_NR_mknod:
8783         if (!(p = lock_user_string(arg1)))
8784             return -TARGET_EFAULT;
8785         ret = get_errno(mknod(p, arg2, arg3));
8786         unlock_user(p, arg1, 0);
8787         return ret;
8788 #endif
8789 #if defined(TARGET_NR_mknodat)
8790     case TARGET_NR_mknodat:
8791         if (!(p = lock_user_string(arg2)))
8792             return -TARGET_EFAULT;
8793         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8794         unlock_user(p, arg2, 0);
8795         return ret;
8796 #endif
8797 #ifdef TARGET_NR_chmod
8798     case TARGET_NR_chmod:
8799         if (!(p = lock_user_string(arg1)))
8800             return -TARGET_EFAULT;
8801         ret = get_errno(chmod(p, arg2));
8802         unlock_user(p, arg1, 0);
8803         return ret;
8804 #endif
8805 #ifdef TARGET_NR_lseek
8806     case TARGET_NR_lseek:
8807         return get_errno(lseek(arg1, arg2, arg3));
8808 #endif
8809 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8810     /* Alpha specific */
8811     case TARGET_NR_getxpid:
8812         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8813         return get_errno(getpid());
8814 #endif
8815 #ifdef TARGET_NR_getpid
8816     case TARGET_NR_getpid:
8817         return get_errno(getpid());
8818 #endif
8819     case TARGET_NR_mount:
8820         {
8821             /* need to look at the data field */
8822             void *p2, *p3;
8823 
8824             if (arg1) {
8825                 p = lock_user_string(arg1);
8826                 if (!p) {
8827                     return -TARGET_EFAULT;
8828                 }
8829             } else {
8830                 p = NULL;
8831             }
8832 
8833             p2 = lock_user_string(arg2);
8834             if (!p2) {
8835                 if (arg1) {
8836                     unlock_user(p, arg1, 0);
8837                 }
8838                 return -TARGET_EFAULT;
8839             }
8840 
8841             if (arg3) {
8842                 p3 = lock_user_string(arg3);
8843                 if (!p3) {
8844                     if (arg1) {
8845                         unlock_user(p, arg1, 0);
8846                     }
8847                     unlock_user(p2, arg2, 0);
8848                     return -TARGET_EFAULT;
8849                 }
8850             } else {
8851                 p3 = NULL;
8852             }
8853 
8854             /* FIXME - arg5 should be locked, but it isn't clear how to
8855              * do that since it's not guaranteed to be a NULL-terminated
8856              * string.
8857              */
8858             if (!arg5) {
8859                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8860             } else {
8861                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8862             }
8863             ret = get_errno(ret);
8864 
8865             if (arg1) {
8866                 unlock_user(p, arg1, 0);
8867             }
8868             unlock_user(p2, arg2, 0);
8869             if (arg3) {
8870                 unlock_user(p3, arg3, 0);
8871             }
8872         }
8873         return ret;
8874 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8875 #if defined(TARGET_NR_umount)
8876     case TARGET_NR_umount:
8877 #endif
8878 #if defined(TARGET_NR_oldumount)
8879     case TARGET_NR_oldumount:
8880 #endif
8881         if (!(p = lock_user_string(arg1)))
8882             return -TARGET_EFAULT;
8883         ret = get_errno(umount(p));
8884         unlock_user(p, arg1, 0);
8885         return ret;
8886 #endif
8887 #ifdef TARGET_NR_stime /* not on alpha */
8888     case TARGET_NR_stime:
8889         {
8890             struct timespec ts;
8891             ts.tv_nsec = 0;
8892             if (get_user_sal(ts.tv_sec, arg1)) {
8893                 return -TARGET_EFAULT;
8894             }
8895             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8896         }
8897 #endif
8898 #ifdef TARGET_NR_alarm /* not on alpha */
8899     case TARGET_NR_alarm:
8900         return alarm(arg1);
8901 #endif
8902 #ifdef TARGET_NR_pause /* not on alpha */
8903     case TARGET_NR_pause:
8904         if (!block_signals()) {
8905             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8906         }
8907         return -TARGET_EINTR;
8908 #endif
8909 #ifdef TARGET_NR_utime
8910     case TARGET_NR_utime:
8911         {
8912             struct utimbuf tbuf, *host_tbuf;
8913             struct target_utimbuf *target_tbuf;
8914             if (arg2) {
8915                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8916                     return -TARGET_EFAULT;
8917                 tbuf.actime = tswapal(target_tbuf->actime);
8918                 tbuf.modtime = tswapal(target_tbuf->modtime);
8919                 unlock_user_struct(target_tbuf, arg2, 0);
8920                 host_tbuf = &tbuf;
8921             } else {
8922                 host_tbuf = NULL;
8923             }
8924             if (!(p = lock_user_string(arg1)))
8925                 return -TARGET_EFAULT;
8926             ret = get_errno(utime(p, host_tbuf));
8927             unlock_user(p, arg1, 0);
8928         }
8929         return ret;
8930 #endif
8931 #ifdef TARGET_NR_utimes
8932     case TARGET_NR_utimes:
8933         {
8934             struct timeval *tvp, tv[2];
8935             if (arg2) {
8936                 if (copy_from_user_timeval(&tv[0], arg2)
8937                     || copy_from_user_timeval(&tv[1],
8938                                               arg2 + sizeof(struct target_timeval)))
8939                     return -TARGET_EFAULT;
8940                 tvp = tv;
8941             } else {
8942                 tvp = NULL;
8943             }
8944             if (!(p = lock_user_string(arg1)))
8945                 return -TARGET_EFAULT;
8946             ret = get_errno(utimes(p, tvp));
8947             unlock_user(p, arg1, 0);
8948         }
8949         return ret;
8950 #endif
8951 #if defined(TARGET_NR_futimesat)
8952     case TARGET_NR_futimesat:
8953         {
8954             struct timeval *tvp, tv[2];
8955             if (arg3) {
8956                 if (copy_from_user_timeval(&tv[0], arg3)
8957                     || copy_from_user_timeval(&tv[1],
8958                                               arg3 + sizeof(struct target_timeval)))
8959                     return -TARGET_EFAULT;
8960                 tvp = tv;
8961             } else {
8962                 tvp = NULL;
8963             }
8964             if (!(p = lock_user_string(arg2))) {
8965                 return -TARGET_EFAULT;
8966             }
8967             ret = get_errno(futimesat(arg1, path(p), tvp));
8968             unlock_user(p, arg2, 0);
8969         }
8970         return ret;
8971 #endif
8972 #ifdef TARGET_NR_access
8973     case TARGET_NR_access:
8974         if (!(p = lock_user_string(arg1))) {
8975             return -TARGET_EFAULT;
8976         }
8977         ret = get_errno(access(path(p), arg2));
8978         unlock_user(p, arg1, 0);
8979         return ret;
8980 #endif
8981 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8982     case TARGET_NR_faccessat:
8983         if (!(p = lock_user_string(arg2))) {
8984             return -TARGET_EFAULT;
8985         }
8986         ret = get_errno(faccessat(arg1, p, arg3, 0));
8987         unlock_user(p, arg2, 0);
8988         return ret;
8989 #endif
8990 #ifdef TARGET_NR_nice /* not on alpha */
8991     case TARGET_NR_nice:
8992         return get_errno(nice(arg1));
8993 #endif
8994     case TARGET_NR_sync:
8995         sync();
8996         return 0;
8997 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8998     case TARGET_NR_syncfs:
8999         return get_errno(syncfs(arg1));
9000 #endif
9001     case TARGET_NR_kill:
9002         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9003 #ifdef TARGET_NR_rename
9004     case TARGET_NR_rename:
9005         {
9006             void *p2;
9007             p = lock_user_string(arg1);
9008             p2 = lock_user_string(arg2);
9009             if (!p || !p2)
9010                 ret = -TARGET_EFAULT;
9011             else
9012                 ret = get_errno(rename(p, p2));
9013             unlock_user(p2, arg2, 0);
9014             unlock_user(p, arg1, 0);
9015         }
9016         return ret;
9017 #endif
9018 #if defined(TARGET_NR_renameat)
9019     case TARGET_NR_renameat:
9020         {
9021             void *p2;
9022             p  = lock_user_string(arg2);
9023             p2 = lock_user_string(arg4);
9024             if (!p || !p2)
9025                 ret = -TARGET_EFAULT;
9026             else
9027                 ret = get_errno(renameat(arg1, p, arg3, p2));
9028             unlock_user(p2, arg4, 0);
9029             unlock_user(p, arg2, 0);
9030         }
9031         return ret;
9032 #endif
9033 #if defined(TARGET_NR_renameat2)
9034     case TARGET_NR_renameat2:
9035         {
9036             void *p2;
9037             p  = lock_user_string(arg2);
9038             p2 = lock_user_string(arg4);
9039             if (!p || !p2) {
9040                 ret = -TARGET_EFAULT;
9041             } else {
9042                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9043             }
9044             unlock_user(p2, arg4, 0);
9045             unlock_user(p, arg2, 0);
9046         }
9047         return ret;
9048 #endif
9049 #ifdef TARGET_NR_mkdir
9050     case TARGET_NR_mkdir:
9051         if (!(p = lock_user_string(arg1)))
9052             return -TARGET_EFAULT;
9053         ret = get_errno(mkdir(p, arg2));
9054         unlock_user(p, arg1, 0);
9055         return ret;
9056 #endif
9057 #if defined(TARGET_NR_mkdirat)
9058     case TARGET_NR_mkdirat:
9059         if (!(p = lock_user_string(arg2)))
9060             return -TARGET_EFAULT;
9061         ret = get_errno(mkdirat(arg1, p, arg3));
9062         unlock_user(p, arg2, 0);
9063         return ret;
9064 #endif
9065 #ifdef TARGET_NR_rmdir
9066     case TARGET_NR_rmdir:
9067         if (!(p = lock_user_string(arg1)))
9068             return -TARGET_EFAULT;
9069         ret = get_errno(rmdir(p));
9070         unlock_user(p, arg1, 0);
9071         return ret;
9072 #endif
9073     case TARGET_NR_dup:
9074         ret = get_errno(dup(arg1));
9075         if (ret >= 0) {
9076             fd_trans_dup(arg1, ret);
9077         }
9078         return ret;
9079 #ifdef TARGET_NR_pipe
9080     case TARGET_NR_pipe:
9081         return do_pipe(cpu_env, arg1, 0, 0);
9082 #endif
9083 #ifdef TARGET_NR_pipe2
9084     case TARGET_NR_pipe2:
9085         return do_pipe(cpu_env, arg1,
9086                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9087 #endif
9088     case TARGET_NR_times:
9089         {
9090             struct target_tms *tmsp;
9091             struct tms tms;
9092             ret = get_errno(times(&tms));
9093             if (arg1) {
9094                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9095                 if (!tmsp)
9096                     return -TARGET_EFAULT;
9097                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9098                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9099                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9100                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9101             }
9102             if (!is_error(ret))
9103                 ret = host_to_target_clock_t(ret);
9104         }
9105         return ret;
9106     case TARGET_NR_acct:
9107         if (arg1 == 0) {
9108             ret = get_errno(acct(NULL));
9109         } else {
9110             if (!(p = lock_user_string(arg1))) {
9111                 return -TARGET_EFAULT;
9112             }
9113             ret = get_errno(acct(path(p)));
9114             unlock_user(p, arg1, 0);
9115         }
9116         return ret;
9117 #ifdef TARGET_NR_umount2
9118     case TARGET_NR_umount2:
9119         if (!(p = lock_user_string(arg1)))
9120             return -TARGET_EFAULT;
9121         ret = get_errno(umount2(p, arg2));
9122         unlock_user(p, arg1, 0);
9123         return ret;
9124 #endif
9125     case TARGET_NR_ioctl:
9126         return do_ioctl(arg1, arg2, arg3);
9127 #ifdef TARGET_NR_fcntl
9128     case TARGET_NR_fcntl:
9129         return do_fcntl(arg1, arg2, arg3);
9130 #endif
9131     case TARGET_NR_setpgid:
9132         return get_errno(setpgid(arg1, arg2));
9133     case TARGET_NR_umask:
9134         return get_errno(umask(arg1));
9135     case TARGET_NR_chroot:
9136         if (!(p = lock_user_string(arg1)))
9137             return -TARGET_EFAULT;
9138         ret = get_errno(chroot(p));
9139         unlock_user(p, arg1, 0);
9140         return ret;
9141 #ifdef TARGET_NR_dup2
9142     case TARGET_NR_dup2:
9143         ret = get_errno(dup2(arg1, arg2));
9144         if (ret >= 0) {
9145             fd_trans_dup(arg1, arg2);
9146         }
9147         return ret;
9148 #endif
9149 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9150     case TARGET_NR_dup3:
9151     {
9152         int host_flags;
9153 
9154         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9155             return -EINVAL;
9156         }
9157         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9158         ret = get_errno(dup3(arg1, arg2, host_flags));
9159         if (ret >= 0) {
9160             fd_trans_dup(arg1, arg2);
9161         }
9162         return ret;
9163     }
9164 #endif
9165 #ifdef TARGET_NR_getppid /* not on alpha */
9166     case TARGET_NR_getppid:
9167         return get_errno(getppid());
9168 #endif
9169 #ifdef TARGET_NR_getpgrp
9170     case TARGET_NR_getpgrp:
9171         return get_errno(getpgrp());
9172 #endif
9173     case TARGET_NR_setsid:
9174         return get_errno(setsid());
9175 #ifdef TARGET_NR_sigaction
9176     case TARGET_NR_sigaction:
9177         {
9178 #if defined(TARGET_MIPS)
9179 	    struct target_sigaction act, oact, *pact, *old_act;
9180 
9181 	    if (arg2) {
9182                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9183                     return -TARGET_EFAULT;
9184 		act._sa_handler = old_act->_sa_handler;
9185 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9186 		act.sa_flags = old_act->sa_flags;
9187 		unlock_user_struct(old_act, arg2, 0);
9188 		pact = &act;
9189 	    } else {
9190 		pact = NULL;
9191 	    }
9192 
9193         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9194 
9195 	    if (!is_error(ret) && arg3) {
9196                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9197                     return -TARGET_EFAULT;
9198 		old_act->_sa_handler = oact._sa_handler;
9199 		old_act->sa_flags = oact.sa_flags;
9200 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9201 		old_act->sa_mask.sig[1] = 0;
9202 		old_act->sa_mask.sig[2] = 0;
9203 		old_act->sa_mask.sig[3] = 0;
9204 		unlock_user_struct(old_act, arg3, 1);
9205 	    }
9206 #else
9207             struct target_old_sigaction *old_act;
9208             struct target_sigaction act, oact, *pact;
9209             if (arg2) {
9210                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9211                     return -TARGET_EFAULT;
9212                 act._sa_handler = old_act->_sa_handler;
9213                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9214                 act.sa_flags = old_act->sa_flags;
9215 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9216                 act.sa_restorer = old_act->sa_restorer;
9217 #endif
9218                 unlock_user_struct(old_act, arg2, 0);
9219                 pact = &act;
9220             } else {
9221                 pact = NULL;
9222             }
9223             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9224             if (!is_error(ret) && arg3) {
9225                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9226                     return -TARGET_EFAULT;
9227                 old_act->_sa_handler = oact._sa_handler;
9228                 old_act->sa_mask = oact.sa_mask.sig[0];
9229                 old_act->sa_flags = oact.sa_flags;
9230 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9231                 old_act->sa_restorer = oact.sa_restorer;
9232 #endif
9233                 unlock_user_struct(old_act, arg3, 1);
9234             }
9235 #endif
9236         }
9237         return ret;
9238 #endif
9239     case TARGET_NR_rt_sigaction:
9240         {
9241             /*
9242              * For Alpha and SPARC this is a 5 argument syscall, with
9243              * a 'restorer' parameter which must be copied into the
9244              * sa_restorer field of the sigaction struct.
9245              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9246              * and arg5 is the sigsetsize.
9247              */
9248 #if defined(TARGET_ALPHA)
9249             target_ulong sigsetsize = arg4;
9250             target_ulong restorer = arg5;
9251 #elif defined(TARGET_SPARC)
9252             target_ulong restorer = arg4;
9253             target_ulong sigsetsize = arg5;
9254 #else
9255             target_ulong sigsetsize = arg4;
9256             target_ulong restorer = 0;
9257 #endif
9258             struct target_sigaction *act = NULL;
9259             struct target_sigaction *oact = NULL;
9260 
9261             if (sigsetsize != sizeof(target_sigset_t)) {
9262                 return -TARGET_EINVAL;
9263             }
9264             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9265                 return -TARGET_EFAULT;
9266             }
9267             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9268                 ret = -TARGET_EFAULT;
9269             } else {
9270                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9271                 if (oact) {
9272                     unlock_user_struct(oact, arg3, 1);
9273                 }
9274             }
9275             if (act) {
9276                 unlock_user_struct(act, arg2, 0);
9277             }
9278         }
9279         return ret;
9280 #ifdef TARGET_NR_sgetmask /* not on alpha */
9281     case TARGET_NR_sgetmask:
9282         {
9283             sigset_t cur_set;
9284             abi_ulong target_set;
9285             ret = do_sigprocmask(0, NULL, &cur_set);
9286             if (!ret) {
9287                 host_to_target_old_sigset(&target_set, &cur_set);
9288                 ret = target_set;
9289             }
9290         }
9291         return ret;
9292 #endif
9293 #ifdef TARGET_NR_ssetmask /* not on alpha */
9294     case TARGET_NR_ssetmask:
9295         {
9296             sigset_t set, oset;
9297             abi_ulong target_set = arg1;
9298             target_to_host_old_sigset(&set, &target_set);
9299             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9300             if (!ret) {
9301                 host_to_target_old_sigset(&target_set, &oset);
9302                 ret = target_set;
9303             }
9304         }
9305         return ret;
9306 #endif
9307 #ifdef TARGET_NR_sigprocmask
9308     case TARGET_NR_sigprocmask:
9309         {
9310 #if defined(TARGET_ALPHA)
9311             sigset_t set, oldset;
9312             abi_ulong mask;
9313             int how;
9314 
9315             switch (arg1) {
9316             case TARGET_SIG_BLOCK:
9317                 how = SIG_BLOCK;
9318                 break;
9319             case TARGET_SIG_UNBLOCK:
9320                 how = SIG_UNBLOCK;
9321                 break;
9322             case TARGET_SIG_SETMASK:
9323                 how = SIG_SETMASK;
9324                 break;
9325             default:
9326                 return -TARGET_EINVAL;
9327             }
9328             mask = arg2;
9329             target_to_host_old_sigset(&set, &mask);
9330 
9331             ret = do_sigprocmask(how, &set, &oldset);
9332             if (!is_error(ret)) {
9333                 host_to_target_old_sigset(&mask, &oldset);
9334                 ret = mask;
9335                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9336             }
9337 #else
9338             sigset_t set, oldset, *set_ptr;
9339             int how;
9340 
9341             if (arg2) {
9342                 switch (arg1) {
9343                 case TARGET_SIG_BLOCK:
9344                     how = SIG_BLOCK;
9345                     break;
9346                 case TARGET_SIG_UNBLOCK:
9347                     how = SIG_UNBLOCK;
9348                     break;
9349                 case TARGET_SIG_SETMASK:
9350                     how = SIG_SETMASK;
9351                     break;
9352                 default:
9353                     return -TARGET_EINVAL;
9354                 }
9355                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9356                     return -TARGET_EFAULT;
9357                 target_to_host_old_sigset(&set, p);
9358                 unlock_user(p, arg2, 0);
9359                 set_ptr = &set;
9360             } else {
9361                 how = 0;
9362                 set_ptr = NULL;
9363             }
9364             ret = do_sigprocmask(how, set_ptr, &oldset);
9365             if (!is_error(ret) && arg3) {
9366                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9367                     return -TARGET_EFAULT;
9368                 host_to_target_old_sigset(p, &oldset);
9369                 unlock_user(p, arg3, sizeof(target_sigset_t));
9370             }
9371 #endif
9372         }
9373         return ret;
9374 #endif
9375     case TARGET_NR_rt_sigprocmask:
9376         {
9377             int how = arg1;
9378             sigset_t set, oldset, *set_ptr;
9379 
9380             if (arg4 != sizeof(target_sigset_t)) {
9381                 return -TARGET_EINVAL;
9382             }
9383 
9384             if (arg2) {
9385                 switch(how) {
9386                 case TARGET_SIG_BLOCK:
9387                     how = SIG_BLOCK;
9388                     break;
9389                 case TARGET_SIG_UNBLOCK:
9390                     how = SIG_UNBLOCK;
9391                     break;
9392                 case TARGET_SIG_SETMASK:
9393                     how = SIG_SETMASK;
9394                     break;
9395                 default:
9396                     return -TARGET_EINVAL;
9397                 }
9398                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9399                     return -TARGET_EFAULT;
9400                 target_to_host_sigset(&set, p);
9401                 unlock_user(p, arg2, 0);
9402                 set_ptr = &set;
9403             } else {
9404                 how = 0;
9405                 set_ptr = NULL;
9406             }
9407             ret = do_sigprocmask(how, set_ptr, &oldset);
9408             if (!is_error(ret) && arg3) {
9409                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9410                     return -TARGET_EFAULT;
9411                 host_to_target_sigset(p, &oldset);
9412                 unlock_user(p, arg3, sizeof(target_sigset_t));
9413             }
9414         }
9415         return ret;
9416 #ifdef TARGET_NR_sigpending
9417     case TARGET_NR_sigpending:
9418         {
9419             sigset_t set;
9420             ret = get_errno(sigpending(&set));
9421             if (!is_error(ret)) {
9422                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9423                     return -TARGET_EFAULT;
9424                 host_to_target_old_sigset(p, &set);
9425                 unlock_user(p, arg1, sizeof(target_sigset_t));
9426             }
9427         }
9428         return ret;
9429 #endif
9430     case TARGET_NR_rt_sigpending:
9431         {
9432             sigset_t set;
9433 
9434             /* Yes, this check is >, not != like most. We follow the kernel's
9435              * logic and it does it like this because it implements
9436              * NR_sigpending through the same code path, and in that case
9437              * the old_sigset_t is smaller in size.
9438              */
9439             if (arg2 > sizeof(target_sigset_t)) {
9440                 return -TARGET_EINVAL;
9441             }
9442 
9443             ret = get_errno(sigpending(&set));
9444             if (!is_error(ret)) {
9445                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9446                     return -TARGET_EFAULT;
9447                 host_to_target_sigset(p, &set);
9448                 unlock_user(p, arg1, sizeof(target_sigset_t));
9449             }
9450         }
9451         return ret;
9452 #ifdef TARGET_NR_sigsuspend
9453     case TARGET_NR_sigsuspend:
9454         {
9455             TaskState *ts = cpu->opaque;
9456 #if defined(TARGET_ALPHA)
9457             abi_ulong mask = arg1;
9458             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9459 #else
9460             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9461                 return -TARGET_EFAULT;
9462             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9463             unlock_user(p, arg1, 0);
9464 #endif
9465             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9466                                                SIGSET_T_SIZE));
9467             if (ret != -QEMU_ERESTARTSYS) {
9468                 ts->in_sigsuspend = 1;
9469             }
9470         }
9471         return ret;
9472 #endif
9473     case TARGET_NR_rt_sigsuspend:
9474         {
9475             TaskState *ts = cpu->opaque;
9476 
9477             if (arg2 != sizeof(target_sigset_t)) {
9478                 return -TARGET_EINVAL;
9479             }
9480             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9481                 return -TARGET_EFAULT;
9482             target_to_host_sigset(&ts->sigsuspend_mask, p);
9483             unlock_user(p, arg1, 0);
9484             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9485                                                SIGSET_T_SIZE));
9486             if (ret != -QEMU_ERESTARTSYS) {
9487                 ts->in_sigsuspend = 1;
9488             }
9489         }
9490         return ret;
9491 #ifdef TARGET_NR_rt_sigtimedwait
9492     case TARGET_NR_rt_sigtimedwait:
9493         {
9494             sigset_t set;
9495             struct timespec uts, *puts;
9496             siginfo_t uinfo;
9497 
9498             if (arg4 != sizeof(target_sigset_t)) {
9499                 return -TARGET_EINVAL;
9500             }
9501 
9502             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9503                 return -TARGET_EFAULT;
9504             target_to_host_sigset(&set, p);
9505             unlock_user(p, arg1, 0);
9506             if (arg3) {
9507                 puts = &uts;
9508                 if (target_to_host_timespec(puts, arg3)) {
9509                     return -TARGET_EFAULT;
9510                 }
9511             } else {
9512                 puts = NULL;
9513             }
9514             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9515                                                  SIGSET_T_SIZE));
9516             if (!is_error(ret)) {
9517                 if (arg2) {
9518                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9519                                   0);
9520                     if (!p) {
9521                         return -TARGET_EFAULT;
9522                     }
9523                     host_to_target_siginfo(p, &uinfo);
9524                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9525                 }
9526                 ret = host_to_target_signal(ret);
9527             }
9528         }
9529         return ret;
9530 #endif
9531 #ifdef TARGET_NR_rt_sigtimedwait_time64
9532     case TARGET_NR_rt_sigtimedwait_time64:
9533         {
9534             sigset_t set;
9535             struct timespec uts, *puts;
9536             siginfo_t uinfo;
9537 
9538             if (arg4 != sizeof(target_sigset_t)) {
9539                 return -TARGET_EINVAL;
9540             }
9541 
9542             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9543             if (!p) {
9544                 return -TARGET_EFAULT;
9545             }
9546             target_to_host_sigset(&set, p);
9547             unlock_user(p, arg1, 0);
9548             if (arg3) {
9549                 puts = &uts;
9550                 if (target_to_host_timespec64(puts, arg3)) {
9551                     return -TARGET_EFAULT;
9552                 }
9553             } else {
9554                 puts = NULL;
9555             }
9556             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9557                                                  SIGSET_T_SIZE));
9558             if (!is_error(ret)) {
9559                 if (arg2) {
9560                     p = lock_user(VERIFY_WRITE, arg2,
9561                                   sizeof(target_siginfo_t), 0);
9562                     if (!p) {
9563                         return -TARGET_EFAULT;
9564                     }
9565                     host_to_target_siginfo(p, &uinfo);
9566                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9567                 }
9568                 ret = host_to_target_signal(ret);
9569             }
9570         }
9571         return ret;
9572 #endif
9573     case TARGET_NR_rt_sigqueueinfo:
9574         {
9575             siginfo_t uinfo;
9576 
9577             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9578             if (!p) {
9579                 return -TARGET_EFAULT;
9580             }
9581             target_to_host_siginfo(&uinfo, p);
9582             unlock_user(p, arg3, 0);
9583             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9584         }
9585         return ret;
9586     case TARGET_NR_rt_tgsigqueueinfo:
9587         {
9588             siginfo_t uinfo;
9589 
9590             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9591             if (!p) {
9592                 return -TARGET_EFAULT;
9593             }
9594             target_to_host_siginfo(&uinfo, p);
9595             unlock_user(p, arg4, 0);
9596             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9597         }
9598         return ret;
9599 #ifdef TARGET_NR_sigreturn
9600     case TARGET_NR_sigreturn:
9601         if (block_signals()) {
9602             return -QEMU_ERESTARTSYS;
9603         }
9604         return do_sigreturn(cpu_env);
9605 #endif
9606     case TARGET_NR_rt_sigreturn:
9607         if (block_signals()) {
9608             return -QEMU_ERESTARTSYS;
9609         }
9610         return do_rt_sigreturn(cpu_env);
9611     case TARGET_NR_sethostname:
9612         if (!(p = lock_user_string(arg1)))
9613             return -TARGET_EFAULT;
9614         ret = get_errno(sethostname(p, arg2));
9615         unlock_user(p, arg1, 0);
9616         return ret;
9617 #ifdef TARGET_NR_setrlimit
9618     case TARGET_NR_setrlimit:
9619         {
9620             int resource = target_to_host_resource(arg1);
9621             struct target_rlimit *target_rlim;
9622             struct rlimit rlim;
9623             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9624                 return -TARGET_EFAULT;
9625             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9626             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9627             unlock_user_struct(target_rlim, arg2, 0);
9628             /*
9629              * If we just passed through resource limit settings for memory then
9630              * they would also apply to QEMU's own allocations, and QEMU will
9631              * crash or hang or die if its allocations fail. Ideally we would
9632              * track the guest allocations in QEMU and apply the limits ourselves.
9633              * For now, just tell the guest the call succeeded but don't actually
9634              * limit anything.
9635              */
9636             if (resource != RLIMIT_AS &&
9637                 resource != RLIMIT_DATA &&
9638                 resource != RLIMIT_STACK) {
9639                 return get_errno(setrlimit(resource, &rlim));
9640             } else {
9641                 return 0;
9642             }
9643         }
9644 #endif
9645 #ifdef TARGET_NR_getrlimit
9646     case TARGET_NR_getrlimit:
9647         {
9648             int resource = target_to_host_resource(arg1);
9649             struct target_rlimit *target_rlim;
9650             struct rlimit rlim;
9651 
9652             ret = get_errno(getrlimit(resource, &rlim));
9653             if (!is_error(ret)) {
9654                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9655                     return -TARGET_EFAULT;
9656                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9657                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9658                 unlock_user_struct(target_rlim, arg2, 1);
9659             }
9660         }
9661         return ret;
9662 #endif
9663     case TARGET_NR_getrusage:
9664         {
9665             struct rusage rusage;
9666             ret = get_errno(getrusage(arg1, &rusage));
9667             if (!is_error(ret)) {
9668                 ret = host_to_target_rusage(arg2, &rusage);
9669             }
9670         }
9671         return ret;
9672 #if defined(TARGET_NR_gettimeofday)
9673     case TARGET_NR_gettimeofday:
9674         {
9675             struct timeval tv;
9676             struct timezone tz;
9677 
9678             ret = get_errno(gettimeofday(&tv, &tz));
9679             if (!is_error(ret)) {
9680                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9681                     return -TARGET_EFAULT;
9682                 }
9683                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9684                     return -TARGET_EFAULT;
9685                 }
9686             }
9687         }
9688         return ret;
9689 #endif
9690 #if defined(TARGET_NR_settimeofday)
9691     case TARGET_NR_settimeofday:
9692         {
9693             struct timeval tv, *ptv = NULL;
9694             struct timezone tz, *ptz = NULL;
9695 
9696             if (arg1) {
9697                 if (copy_from_user_timeval(&tv, arg1)) {
9698                     return -TARGET_EFAULT;
9699                 }
9700                 ptv = &tv;
9701             }
9702 
9703             if (arg2) {
9704                 if (copy_from_user_timezone(&tz, arg2)) {
9705                     return -TARGET_EFAULT;
9706                 }
9707                 ptz = &tz;
9708             }
9709 
9710             return get_errno(settimeofday(ptv, ptz));
9711         }
9712 #endif
9713 #if defined(TARGET_NR_select)
9714     case TARGET_NR_select:
9715 #if defined(TARGET_WANT_NI_OLD_SELECT)
9716         /* some architectures used to have old_select here
9717          * but now ENOSYS it.
9718          */
9719         ret = -TARGET_ENOSYS;
9720 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9721         ret = do_old_select(arg1);
9722 #else
9723         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9724 #endif
9725         return ret;
9726 #endif
9727 #ifdef TARGET_NR_pselect6
9728     case TARGET_NR_pselect6:
9729         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9730 #endif
9731 #ifdef TARGET_NR_pselect6_time64
9732     case TARGET_NR_pselect6_time64:
9733         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9734 #endif
9735 #ifdef TARGET_NR_symlink
9736     case TARGET_NR_symlink:
9737         {
9738             void *p2;
9739             p = lock_user_string(arg1);
9740             p2 = lock_user_string(arg2);
9741             if (!p || !p2)
9742                 ret = -TARGET_EFAULT;
9743             else
9744                 ret = get_errno(symlink(p, p2));
9745             unlock_user(p2, arg2, 0);
9746             unlock_user(p, arg1, 0);
9747         }
9748         return ret;
9749 #endif
9750 #if defined(TARGET_NR_symlinkat)
9751     case TARGET_NR_symlinkat:
9752         {
9753             void *p2;
9754             p  = lock_user_string(arg1);
9755             p2 = lock_user_string(arg3);
9756             if (!p || !p2)
9757                 ret = -TARGET_EFAULT;
9758             else
9759                 ret = get_errno(symlinkat(p, arg2, p2));
9760             unlock_user(p2, arg3, 0);
9761             unlock_user(p, arg1, 0);
9762         }
9763         return ret;
9764 #endif
9765 #ifdef TARGET_NR_readlink
9766     case TARGET_NR_readlink:
9767         {
9768             void *p2;
9769             p = lock_user_string(arg1);
9770             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9771             if (!p || !p2) {
9772                 ret = -TARGET_EFAULT;
9773             } else if (!arg3) {
9774                 /* Short circuit this for the magic exe check. */
9775                 ret = -TARGET_EINVAL;
9776             } else if (is_proc_myself((const char *)p, "exe")) {
9777                 char real[PATH_MAX], *temp;
9778                 temp = realpath(exec_path, real);
9779                 /* Return value is # of bytes that we wrote to the buffer. */
9780                 if (temp == NULL) {
9781                     ret = get_errno(-1);
9782                 } else {
9783                     /* Don't worry about sign mismatch as earlier mapping
9784                      * logic would have thrown a bad address error. */
9785                     ret = MIN(strlen(real), arg3);
9786                     /* We cannot NUL terminate the string. */
9787                     memcpy(p2, real, ret);
9788                 }
9789             } else {
9790                 ret = get_errno(readlink(path(p), p2, arg3));
9791             }
9792             unlock_user(p2, arg2, ret);
9793             unlock_user(p, arg1, 0);
9794         }
9795         return ret;
9796 #endif
9797 #if defined(TARGET_NR_readlinkat)
9798     case TARGET_NR_readlinkat:
9799         {
9800             void *p2;
9801             p  = lock_user_string(arg2);
9802             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9803             if (!p || !p2) {
9804                 ret = -TARGET_EFAULT;
9805             } else if (is_proc_myself((const char *)p, "exe")) {
9806                 char real[PATH_MAX], *temp;
9807                 temp = realpath(exec_path, real);
9808                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9809                 snprintf((char *)p2, arg4, "%s", real);
9810             } else {
9811                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9812             }
9813             unlock_user(p2, arg3, ret);
9814             unlock_user(p, arg2, 0);
9815         }
9816         return ret;
9817 #endif
9818 #ifdef TARGET_NR_swapon
9819     case TARGET_NR_swapon:
9820         if (!(p = lock_user_string(arg1)))
9821             return -TARGET_EFAULT;
9822         ret = get_errno(swapon(p, arg2));
9823         unlock_user(p, arg1, 0);
9824         return ret;
9825 #endif
9826     case TARGET_NR_reboot:
9827         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9828            /* arg4 must be ignored in all other cases */
9829            p = lock_user_string(arg4);
9830            if (!p) {
9831                return -TARGET_EFAULT;
9832            }
9833            ret = get_errno(reboot(arg1, arg2, arg3, p));
9834            unlock_user(p, arg4, 0);
9835         } else {
9836            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9837         }
9838         return ret;
9839 #ifdef TARGET_NR_mmap
9840     case TARGET_NR_mmap:
9841 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9842     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9843     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9844     || defined(TARGET_S390X)
9845         {
9846             abi_ulong *v;
9847             abi_ulong v1, v2, v3, v4, v5, v6;
9848             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9849                 return -TARGET_EFAULT;
9850             v1 = tswapal(v[0]);
9851             v2 = tswapal(v[1]);
9852             v3 = tswapal(v[2]);
9853             v4 = tswapal(v[3]);
9854             v5 = tswapal(v[4]);
9855             v6 = tswapal(v[5]);
9856             unlock_user(v, arg1, 0);
9857             ret = get_errno(target_mmap(v1, v2, v3,
9858                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9859                                         v5, v6));
9860         }
9861 #else
9862         /* mmap pointers are always untagged */
9863         ret = get_errno(target_mmap(arg1, arg2, arg3,
9864                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9865                                     arg5,
9866                                     arg6));
9867 #endif
9868         return ret;
9869 #endif
9870 #ifdef TARGET_NR_mmap2
9871     case TARGET_NR_mmap2:
9872 #ifndef MMAP_SHIFT
9873 #define MMAP_SHIFT 12
9874 #endif
9875         ret = target_mmap(arg1, arg2, arg3,
9876                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9877                           arg5, arg6 << MMAP_SHIFT);
9878         return get_errno(ret);
9879 #endif
9880     case TARGET_NR_munmap:
9881         arg1 = cpu_untagged_addr(cpu, arg1);
9882         return get_errno(target_munmap(arg1, arg2));
9883     case TARGET_NR_mprotect:
9884         arg1 = cpu_untagged_addr(cpu, arg1);
9885         {
9886             TaskState *ts = cpu->opaque;
9887             /* Special hack to detect libc making the stack executable.  */
9888             if ((arg3 & PROT_GROWSDOWN)
9889                 && arg1 >= ts->info->stack_limit
9890                 && arg1 <= ts->info->start_stack) {
9891                 arg3 &= ~PROT_GROWSDOWN;
9892                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9893                 arg1 = ts->info->stack_limit;
9894             }
9895         }
9896         return get_errno(target_mprotect(arg1, arg2, arg3));
9897 #ifdef TARGET_NR_mremap
9898     case TARGET_NR_mremap:
9899         arg1 = cpu_untagged_addr(cpu, arg1);
9900         /* mremap new_addr (arg5) is always untagged */
9901         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9902 #endif
9903         /* ??? msync/mlock/munlock are broken for softmmu.  */
9904 #ifdef TARGET_NR_msync
9905     case TARGET_NR_msync:
9906         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9907 #endif
9908 #ifdef TARGET_NR_mlock
9909     case TARGET_NR_mlock:
9910         return get_errno(mlock(g2h(cpu, arg1), arg2));
9911 #endif
9912 #ifdef TARGET_NR_munlock
9913     case TARGET_NR_munlock:
9914         return get_errno(munlock(g2h(cpu, arg1), arg2));
9915 #endif
9916 #ifdef TARGET_NR_mlockall
9917     case TARGET_NR_mlockall:
9918         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9919 #endif
9920 #ifdef TARGET_NR_munlockall
9921     case TARGET_NR_munlockall:
9922         return get_errno(munlockall());
9923 #endif
9924 #ifdef TARGET_NR_truncate
9925     case TARGET_NR_truncate:
9926         if (!(p = lock_user_string(arg1)))
9927             return -TARGET_EFAULT;
9928         ret = get_errno(truncate(p, arg2));
9929         unlock_user(p, arg1, 0);
9930         return ret;
9931 #endif
9932 #ifdef TARGET_NR_ftruncate
9933     case TARGET_NR_ftruncate:
9934         return get_errno(ftruncate(arg1, arg2));
9935 #endif
9936     case TARGET_NR_fchmod:
9937         return get_errno(fchmod(arg1, arg2));
9938 #if defined(TARGET_NR_fchmodat)
9939     case TARGET_NR_fchmodat:
9940         if (!(p = lock_user_string(arg2)))
9941             return -TARGET_EFAULT;
9942         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9943         unlock_user(p, arg2, 0);
9944         return ret;
9945 #endif
9946     case TARGET_NR_getpriority:
9947         /* Note that negative values are valid for getpriority, so we must
9948            differentiate based on errno settings.  */
9949         errno = 0;
9950         ret = getpriority(arg1, arg2);
9951         if (ret == -1 && errno != 0) {
9952             return -host_to_target_errno(errno);
9953         }
9954 #ifdef TARGET_ALPHA
9955         /* Return value is the unbiased priority.  Signal no error.  */
9956         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9957 #else
9958         /* Return value is a biased priority to avoid negative numbers.  */
9959         ret = 20 - ret;
9960 #endif
9961         return ret;
9962     case TARGET_NR_setpriority:
9963         return get_errno(setpriority(arg1, arg2, arg3));
9964 #ifdef TARGET_NR_statfs
9965     case TARGET_NR_statfs:
9966         if (!(p = lock_user_string(arg1))) {
9967             return -TARGET_EFAULT;
9968         }
9969         ret = get_errno(statfs(path(p), &stfs));
9970         unlock_user(p, arg1, 0);
9971     convert_statfs:
9972         if (!is_error(ret)) {
9973             struct target_statfs *target_stfs;
9974 
9975             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9976                 return -TARGET_EFAULT;
9977             __put_user(stfs.f_type, &target_stfs->f_type);
9978             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9979             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9980             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9981             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9982             __put_user(stfs.f_files, &target_stfs->f_files);
9983             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9984             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9985             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9986             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9987             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9988 #ifdef _STATFS_F_FLAGS
9989             __put_user(stfs.f_flags, &target_stfs->f_flags);
9990 #else
9991             __put_user(0, &target_stfs->f_flags);
9992 #endif
9993             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9994             unlock_user_struct(target_stfs, arg2, 1);
9995         }
9996         return ret;
9997 #endif
9998 #ifdef TARGET_NR_fstatfs
9999     case TARGET_NR_fstatfs:
10000         ret = get_errno(fstatfs(arg1, &stfs));
10001         goto convert_statfs;
10002 #endif
10003 #ifdef TARGET_NR_statfs64
10004     case TARGET_NR_statfs64:
10005         if (!(p = lock_user_string(arg1))) {
10006             return -TARGET_EFAULT;
10007         }
10008         ret = get_errno(statfs(path(p), &stfs));
10009         unlock_user(p, arg1, 0);
10010     convert_statfs64:
10011         if (!is_error(ret)) {
10012             struct target_statfs64 *target_stfs;
10013 
10014             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10015                 return -TARGET_EFAULT;
10016             __put_user(stfs.f_type, &target_stfs->f_type);
10017             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10018             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10019             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10020             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10021             __put_user(stfs.f_files, &target_stfs->f_files);
10022             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10023             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10024             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10025             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10026             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10027 #ifdef _STATFS_F_FLAGS
10028             __put_user(stfs.f_flags, &target_stfs->f_flags);
10029 #else
10030             __put_user(0, &target_stfs->f_flags);
10031 #endif
10032             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10033             unlock_user_struct(target_stfs, arg3, 1);
10034         }
10035         return ret;
10036     case TARGET_NR_fstatfs64:
10037         ret = get_errno(fstatfs(arg1, &stfs));
10038         goto convert_statfs64;
10039 #endif
10040 #ifdef TARGET_NR_socketcall
10041     case TARGET_NR_socketcall:
10042         return do_socketcall(arg1, arg2);
10043 #endif
10044 #ifdef TARGET_NR_accept
10045     case TARGET_NR_accept:
10046         return do_accept4(arg1, arg2, arg3, 0);
10047 #endif
10048 #ifdef TARGET_NR_accept4
10049     case TARGET_NR_accept4:
10050         return do_accept4(arg1, arg2, arg3, arg4);
10051 #endif
10052 #ifdef TARGET_NR_bind
10053     case TARGET_NR_bind:
10054         return do_bind(arg1, arg2, arg3);
10055 #endif
10056 #ifdef TARGET_NR_connect
10057     case TARGET_NR_connect:
10058         return do_connect(arg1, arg2, arg3);
10059 #endif
10060 #ifdef TARGET_NR_getpeername
10061     case TARGET_NR_getpeername:
10062         return do_getpeername(arg1, arg2, arg3);
10063 #endif
10064 #ifdef TARGET_NR_getsockname
10065     case TARGET_NR_getsockname:
10066         return do_getsockname(arg1, arg2, arg3);
10067 #endif
10068 #ifdef TARGET_NR_getsockopt
10069     case TARGET_NR_getsockopt:
10070         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10071 #endif
10072 #ifdef TARGET_NR_listen
10073     case TARGET_NR_listen:
10074         return get_errno(listen(arg1, arg2));
10075 #endif
10076 #ifdef TARGET_NR_recv
10077     case TARGET_NR_recv:
10078         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10079 #endif
10080 #ifdef TARGET_NR_recvfrom
10081     case TARGET_NR_recvfrom:
10082         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10083 #endif
10084 #ifdef TARGET_NR_recvmsg
10085     case TARGET_NR_recvmsg:
10086         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10087 #endif
10088 #ifdef TARGET_NR_send
10089     case TARGET_NR_send:
10090         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10091 #endif
10092 #ifdef TARGET_NR_sendmsg
10093     case TARGET_NR_sendmsg:
10094         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10095 #endif
10096 #ifdef TARGET_NR_sendmmsg
10097     case TARGET_NR_sendmmsg:
10098         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10099 #endif
10100 #ifdef TARGET_NR_recvmmsg
10101     case TARGET_NR_recvmmsg:
10102         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10103 #endif
10104 #ifdef TARGET_NR_sendto
10105     case TARGET_NR_sendto:
10106         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10107 #endif
10108 #ifdef TARGET_NR_shutdown
10109     case TARGET_NR_shutdown:
10110         return get_errno(shutdown(arg1, arg2));
10111 #endif
10112 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10113     case TARGET_NR_getrandom:
10114         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10115         if (!p) {
10116             return -TARGET_EFAULT;
10117         }
10118         ret = get_errno(getrandom(p, arg2, arg3));
10119         unlock_user(p, arg1, ret);
10120         return ret;
10121 #endif
10122 #ifdef TARGET_NR_socket
10123     case TARGET_NR_socket:
10124         return do_socket(arg1, arg2, arg3);
10125 #endif
10126 #ifdef TARGET_NR_socketpair
10127     case TARGET_NR_socketpair:
10128         return do_socketpair(arg1, arg2, arg3, arg4);
10129 #endif
10130 #ifdef TARGET_NR_setsockopt
10131     case TARGET_NR_setsockopt:
10132         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10133 #endif
10134 #if defined(TARGET_NR_syslog)
10135     case TARGET_NR_syslog:
10136         {
10137             int len = arg2;
10138 
10139             switch (arg1) {
10140             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10141             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10142             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10143             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10144             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10145             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10146             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10147             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10148                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10149             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10150             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10151             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10152                 {
10153                     if (len < 0) {
10154                         return -TARGET_EINVAL;
10155                     }
10156                     if (len == 0) {
10157                         return 0;
10158                     }
10159                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10160                     if (!p) {
10161                         return -TARGET_EFAULT;
10162                     }
10163                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10164                     unlock_user(p, arg2, arg3);
10165                 }
10166                 return ret;
10167             default:
10168                 return -TARGET_EINVAL;
10169             }
10170         }
10171         break;
10172 #endif
10173     case TARGET_NR_setitimer:
10174         {
10175             struct itimerval value, ovalue, *pvalue;
10176 
10177             if (arg2) {
10178                 pvalue = &value;
10179                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10180                     || copy_from_user_timeval(&pvalue->it_value,
10181                                               arg2 + sizeof(struct target_timeval)))
10182                     return -TARGET_EFAULT;
10183             } else {
10184                 pvalue = NULL;
10185             }
10186             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10187             if (!is_error(ret) && arg3) {
10188                 if (copy_to_user_timeval(arg3,
10189                                          &ovalue.it_interval)
10190                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10191                                             &ovalue.it_value))
10192                     return -TARGET_EFAULT;
10193             }
10194         }
10195         return ret;
10196     case TARGET_NR_getitimer:
10197         {
10198             struct itimerval value;
10199 
10200             ret = get_errno(getitimer(arg1, &value));
10201             if (!is_error(ret) && arg2) {
10202                 if (copy_to_user_timeval(arg2,
10203                                          &value.it_interval)
10204                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10205                                             &value.it_value))
10206                     return -TARGET_EFAULT;
10207             }
10208         }
10209         return ret;
10210 #ifdef TARGET_NR_stat
10211     case TARGET_NR_stat:
10212         if (!(p = lock_user_string(arg1))) {
10213             return -TARGET_EFAULT;
10214         }
10215         ret = get_errno(stat(path(p), &st));
10216         unlock_user(p, arg1, 0);
10217         goto do_stat;
10218 #endif
10219 #ifdef TARGET_NR_lstat
10220     case TARGET_NR_lstat:
10221         if (!(p = lock_user_string(arg1))) {
10222             return -TARGET_EFAULT;
10223         }
10224         ret = get_errno(lstat(path(p), &st));
10225         unlock_user(p, arg1, 0);
10226         goto do_stat;
10227 #endif
10228 #ifdef TARGET_NR_fstat
10229     case TARGET_NR_fstat:
10230         {
10231             ret = get_errno(fstat(arg1, &st));
10232 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10233         do_stat:
10234 #endif
10235             if (!is_error(ret)) {
10236                 struct target_stat *target_st;
10237 
10238                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10239                     return -TARGET_EFAULT;
10240                 memset(target_st, 0, sizeof(*target_st));
10241                 __put_user(st.st_dev, &target_st->st_dev);
10242                 __put_user(st.st_ino, &target_st->st_ino);
10243                 __put_user(st.st_mode, &target_st->st_mode);
10244                 __put_user(st.st_uid, &target_st->st_uid);
10245                 __put_user(st.st_gid, &target_st->st_gid);
10246                 __put_user(st.st_nlink, &target_st->st_nlink);
10247                 __put_user(st.st_rdev, &target_st->st_rdev);
10248                 __put_user(st.st_size, &target_st->st_size);
10249                 __put_user(st.st_blksize, &target_st->st_blksize);
10250                 __put_user(st.st_blocks, &target_st->st_blocks);
10251                 __put_user(st.st_atime, &target_st->target_st_atime);
10252                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10253                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10254 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10255                 __put_user(st.st_atim.tv_nsec,
10256                            &target_st->target_st_atime_nsec);
10257                 __put_user(st.st_mtim.tv_nsec,
10258                            &target_st->target_st_mtime_nsec);
10259                 __put_user(st.st_ctim.tv_nsec,
10260                            &target_st->target_st_ctime_nsec);
10261 #endif
10262                 unlock_user_struct(target_st, arg2, 1);
10263             }
10264         }
10265         return ret;
10266 #endif
10267     case TARGET_NR_vhangup:
10268         return get_errno(vhangup());
10269 #ifdef TARGET_NR_syscall
10270     case TARGET_NR_syscall:
10271         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10272                           arg6, arg7, arg8, 0);
10273 #endif
10274 #if defined(TARGET_NR_wait4)
10275     case TARGET_NR_wait4:
10276         {
10277             int status;
10278             abi_long status_ptr = arg2;
10279             struct rusage rusage, *rusage_ptr;
10280             abi_ulong target_rusage = arg4;
10281             abi_long rusage_err;
10282             if (target_rusage)
10283                 rusage_ptr = &rusage;
10284             else
10285                 rusage_ptr = NULL;
10286             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10287             if (!is_error(ret)) {
10288                 if (status_ptr && ret) {
10289                     status = host_to_target_waitstatus(status);
10290                     if (put_user_s32(status, status_ptr))
10291                         return -TARGET_EFAULT;
10292                 }
10293                 if (target_rusage) {
10294                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10295                     if (rusage_err) {
10296                         ret = rusage_err;
10297                     }
10298                 }
10299             }
10300         }
10301         return ret;
10302 #endif
10303 #ifdef TARGET_NR_swapoff
10304     case TARGET_NR_swapoff:
10305         if (!(p = lock_user_string(arg1)))
10306             return -TARGET_EFAULT;
10307         ret = get_errno(swapoff(p));
10308         unlock_user(p, arg1, 0);
10309         return ret;
10310 #endif
10311     case TARGET_NR_sysinfo:
10312         {
10313             struct target_sysinfo *target_value;
10314             struct sysinfo value;
10315             ret = get_errno(sysinfo(&value));
10316             if (!is_error(ret) && arg1)
10317             {
10318                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10319                     return -TARGET_EFAULT;
10320                 __put_user(value.uptime, &target_value->uptime);
10321                 __put_user(value.loads[0], &target_value->loads[0]);
10322                 __put_user(value.loads[1], &target_value->loads[1]);
10323                 __put_user(value.loads[2], &target_value->loads[2]);
10324                 __put_user(value.totalram, &target_value->totalram);
10325                 __put_user(value.freeram, &target_value->freeram);
10326                 __put_user(value.sharedram, &target_value->sharedram);
10327                 __put_user(value.bufferram, &target_value->bufferram);
10328                 __put_user(value.totalswap, &target_value->totalswap);
10329                 __put_user(value.freeswap, &target_value->freeswap);
10330                 __put_user(value.procs, &target_value->procs);
10331                 __put_user(value.totalhigh, &target_value->totalhigh);
10332                 __put_user(value.freehigh, &target_value->freehigh);
10333                 __put_user(value.mem_unit, &target_value->mem_unit);
10334                 unlock_user_struct(target_value, arg1, 1);
10335             }
10336         }
10337         return ret;
10338 #ifdef TARGET_NR_ipc
10339     case TARGET_NR_ipc:
10340         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10341 #endif
10342 #ifdef TARGET_NR_semget
10343     case TARGET_NR_semget:
10344         return get_errno(semget(arg1, arg2, arg3));
10345 #endif
10346 #ifdef TARGET_NR_semop
10347     case TARGET_NR_semop:
10348         return do_semtimedop(arg1, arg2, arg3, 0, false);
10349 #endif
10350 #ifdef TARGET_NR_semtimedop
10351     case TARGET_NR_semtimedop:
10352         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10353 #endif
10354 #ifdef TARGET_NR_semtimedop_time64
10355     case TARGET_NR_semtimedop_time64:
10356         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10357 #endif
10358 #ifdef TARGET_NR_semctl
10359     case TARGET_NR_semctl:
10360         return do_semctl(arg1, arg2, arg3, arg4);
10361 #endif
10362 #ifdef TARGET_NR_msgctl
10363     case TARGET_NR_msgctl:
10364         return do_msgctl(arg1, arg2, arg3);
10365 #endif
10366 #ifdef TARGET_NR_msgget
10367     case TARGET_NR_msgget:
10368         return get_errno(msgget(arg1, arg2));
10369 #endif
10370 #ifdef TARGET_NR_msgrcv
10371     case TARGET_NR_msgrcv:
10372         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10373 #endif
10374 #ifdef TARGET_NR_msgsnd
10375     case TARGET_NR_msgsnd:
10376         return do_msgsnd(arg1, arg2, arg3, arg4);
10377 #endif
10378 #ifdef TARGET_NR_shmget
10379     case TARGET_NR_shmget:
10380         return get_errno(shmget(arg1, arg2, arg3));
10381 #endif
10382 #ifdef TARGET_NR_shmctl
10383     case TARGET_NR_shmctl:
10384         return do_shmctl(arg1, arg2, arg3);
10385 #endif
10386 #ifdef TARGET_NR_shmat
10387     case TARGET_NR_shmat:
10388         return do_shmat(cpu_env, arg1, arg2, arg3);
10389 #endif
10390 #ifdef TARGET_NR_shmdt
10391     case TARGET_NR_shmdt:
10392         return do_shmdt(arg1);
10393 #endif
10394     case TARGET_NR_fsync:
10395         return get_errno(fsync(arg1));
10396     case TARGET_NR_clone:
10397         /* Linux manages to have three different orderings for its
10398          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10399          * match the kernel's CONFIG_CLONE_* settings.
10400          * Microblaze is further special in that it uses a sixth
10401          * implicit argument to clone for the TLS pointer.
10402          */
10403 #if defined(TARGET_MICROBLAZE)
10404         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10405 #elif defined(TARGET_CLONE_BACKWARDS)
10406         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10407 #elif defined(TARGET_CLONE_BACKWARDS2)
10408         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10409 #else
10410         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10411 #endif
10412         return ret;
10413 #ifdef __NR_exit_group
10414         /* new thread calls */
10415     case TARGET_NR_exit_group:
10416         preexit_cleanup(cpu_env, arg1);
10417         return get_errno(exit_group(arg1));
10418 #endif
10419     case TARGET_NR_setdomainname:
10420         if (!(p = lock_user_string(arg1)))
10421             return -TARGET_EFAULT;
10422         ret = get_errno(setdomainname(p, arg2));
10423         unlock_user(p, arg1, 0);
10424         return ret;
10425     case TARGET_NR_uname:
10426         /* no need to transcode because we use the linux syscall */
10427         {
10428             struct new_utsname * buf;
10429 
10430             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10431                 return -TARGET_EFAULT;
10432             ret = get_errno(sys_uname(buf));
10433             if (!is_error(ret)) {
10434                 /* Overwrite the native machine name with whatever is being
10435                    emulated. */
10436                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10437                           sizeof(buf->machine));
10438                 /* Allow the user to override the reported release.  */
10439                 if (qemu_uname_release && *qemu_uname_release) {
10440                     g_strlcpy(buf->release, qemu_uname_release,
10441                               sizeof(buf->release));
10442                 }
10443             }
10444             unlock_user_struct(buf, arg1, 1);
10445         }
10446         return ret;
10447 #ifdef TARGET_I386
10448     case TARGET_NR_modify_ldt:
10449         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10450 #if !defined(TARGET_X86_64)
10451     case TARGET_NR_vm86:
10452         return do_vm86(cpu_env, arg1, arg2);
10453 #endif
10454 #endif
10455 #if defined(TARGET_NR_adjtimex)
10456     case TARGET_NR_adjtimex:
10457         {
10458             struct timex host_buf;
10459 
10460             if (target_to_host_timex(&host_buf, arg1) != 0) {
10461                 return -TARGET_EFAULT;
10462             }
10463             ret = get_errno(adjtimex(&host_buf));
10464             if (!is_error(ret)) {
10465                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10466                     return -TARGET_EFAULT;
10467                 }
10468             }
10469         }
10470         return ret;
10471 #endif
10472 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10473     case TARGET_NR_clock_adjtime:
10474         {
10475             struct timex htx, *phtx = &htx;
10476 
10477             if (target_to_host_timex(phtx, arg2) != 0) {
10478                 return -TARGET_EFAULT;
10479             }
10480             ret = get_errno(clock_adjtime(arg1, phtx));
10481             if (!is_error(ret) && phtx) {
10482                 if (host_to_target_timex(arg2, phtx) != 0) {
10483                     return -TARGET_EFAULT;
10484                 }
10485             }
10486         }
10487         return ret;
10488 #endif
10489 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10490     case TARGET_NR_clock_adjtime64:
10491         {
10492             struct timex htx;
10493 
10494             if (target_to_host_timex64(&htx, arg2) != 0) {
10495                 return -TARGET_EFAULT;
10496             }
10497             ret = get_errno(clock_adjtime(arg1, &htx));
10498             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10499                     return -TARGET_EFAULT;
10500             }
10501         }
10502         return ret;
10503 #endif
10504     case TARGET_NR_getpgid:
10505         return get_errno(getpgid(arg1));
10506     case TARGET_NR_fchdir:
10507         return get_errno(fchdir(arg1));
10508     case TARGET_NR_personality:
10509         return get_errno(personality(arg1));
10510 #ifdef TARGET_NR__llseek /* Not on alpha */
10511     case TARGET_NR__llseek:
10512         {
10513             int64_t res;
10514 #if !defined(__NR_llseek)
10515             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10516             if (res == -1) {
10517                 ret = get_errno(res);
10518             } else {
10519                 ret = 0;
10520             }
10521 #else
10522             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10523 #endif
10524             if ((ret == 0) && put_user_s64(res, arg4)) {
10525                 return -TARGET_EFAULT;
10526             }
10527         }
10528         return ret;
10529 #endif
10530 #ifdef TARGET_NR_getdents
10531     case TARGET_NR_getdents:
10532         return do_getdents(arg1, arg2, arg3);
10533 #endif /* TARGET_NR_getdents */
10534 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10535     case TARGET_NR_getdents64:
10536         return do_getdents64(arg1, arg2, arg3);
10537 #endif /* TARGET_NR_getdents64 */
10538 #if defined(TARGET_NR__newselect)
10539     case TARGET_NR__newselect:
10540         return do_select(arg1, arg2, arg3, arg4, arg5);
10541 #endif
10542 #ifdef TARGET_NR_poll
10543     case TARGET_NR_poll:
10544         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10545 #endif
10546 #ifdef TARGET_NR_ppoll
10547     case TARGET_NR_ppoll:
10548         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10549 #endif
10550 #ifdef TARGET_NR_ppoll_time64
10551     case TARGET_NR_ppoll_time64:
10552         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10553 #endif
10554     case TARGET_NR_flock:
10555         /* NOTE: the flock constant seems to be the same for every
10556            Linux platform */
10557         return get_errno(safe_flock(arg1, arg2));
10558     case TARGET_NR_readv:
10559         {
10560             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10561             if (vec != NULL) {
10562                 ret = get_errno(safe_readv(arg1, vec, arg3));
10563                 unlock_iovec(vec, arg2, arg3, 1);
10564             } else {
10565                 ret = -host_to_target_errno(errno);
10566             }
10567         }
10568         return ret;
10569     case TARGET_NR_writev:
10570         {
10571             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10572             if (vec != NULL) {
10573                 ret = get_errno(safe_writev(arg1, vec, arg3));
10574                 unlock_iovec(vec, arg2, arg3, 0);
10575             } else {
10576                 ret = -host_to_target_errno(errno);
10577             }
10578         }
10579         return ret;
10580 #if defined(TARGET_NR_preadv)
10581     case TARGET_NR_preadv:
10582         {
10583             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10584             if (vec != NULL) {
10585                 unsigned long low, high;
10586 
10587                 target_to_host_low_high(arg4, arg5, &low, &high);
10588                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10589                 unlock_iovec(vec, arg2, arg3, 1);
10590             } else {
10591                 ret = -host_to_target_errno(errno);
10592            }
10593         }
10594         return ret;
10595 #endif
10596 #if defined(TARGET_NR_pwritev)
10597     case TARGET_NR_pwritev:
10598         {
10599             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10600             if (vec != NULL) {
10601                 unsigned long low, high;
10602 
10603                 target_to_host_low_high(arg4, arg5, &low, &high);
10604                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10605                 unlock_iovec(vec, arg2, arg3, 0);
10606             } else {
10607                 ret = -host_to_target_errno(errno);
10608            }
10609         }
10610         return ret;
10611 #endif
10612     case TARGET_NR_getsid:
10613         return get_errno(getsid(arg1));
10614 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10615     case TARGET_NR_fdatasync:
10616         return get_errno(fdatasync(arg1));
10617 #endif
10618     case TARGET_NR_sched_getaffinity:
10619         {
10620             unsigned int mask_size;
10621             unsigned long *mask;
10622 
10623             /*
10624              * sched_getaffinity needs multiples of ulong, so need to take
10625              * care of mismatches between target ulong and host ulong sizes.
10626              */
10627             if (arg2 & (sizeof(abi_ulong) - 1)) {
10628                 return -TARGET_EINVAL;
10629             }
10630             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10631 
10632             mask = alloca(mask_size);
10633             memset(mask, 0, mask_size);
10634             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10635 
10636             if (!is_error(ret)) {
10637                 if (ret > arg2) {
10638                     /* More data returned than the caller's buffer will fit.
10639                      * This only happens if sizeof(abi_long) < sizeof(long)
10640                      * and the caller passed us a buffer holding an odd number
10641                      * of abi_longs. If the host kernel is actually using the
10642                      * extra 4 bytes then fail EINVAL; otherwise we can just
10643                      * ignore them and only copy the interesting part.
10644                      */
10645                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10646                     if (numcpus > arg2 * 8) {
10647                         return -TARGET_EINVAL;
10648                     }
10649                     ret = arg2;
10650                 }
10651 
10652                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10653                     return -TARGET_EFAULT;
10654                 }
10655             }
10656         }
10657         return ret;
10658     case TARGET_NR_sched_setaffinity:
10659         {
10660             unsigned int mask_size;
10661             unsigned long *mask;
10662 
10663             /*
10664              * sched_setaffinity needs multiples of ulong, so need to take
10665              * care of mismatches between target ulong and host ulong sizes.
10666              */
10667             if (arg2 & (sizeof(abi_ulong) - 1)) {
10668                 return -TARGET_EINVAL;
10669             }
10670             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10671             mask = alloca(mask_size);
10672 
10673             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10674             if (ret) {
10675                 return ret;
10676             }
10677 
10678             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10679         }
10680     case TARGET_NR_getcpu:
10681         {
10682             unsigned cpu, node;
10683             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10684                                        arg2 ? &node : NULL,
10685                                        NULL));
10686             if (is_error(ret)) {
10687                 return ret;
10688             }
10689             if (arg1 && put_user_u32(cpu, arg1)) {
10690                 return -TARGET_EFAULT;
10691             }
10692             if (arg2 && put_user_u32(node, arg2)) {
10693                 return -TARGET_EFAULT;
10694             }
10695         }
10696         return ret;
10697     case TARGET_NR_sched_setparam:
10698         {
10699             struct sched_param *target_schp;
10700             struct sched_param schp;
10701 
10702             if (arg2 == 0) {
10703                 return -TARGET_EINVAL;
10704             }
10705             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10706                 return -TARGET_EFAULT;
10707             schp.sched_priority = tswap32(target_schp->sched_priority);
10708             unlock_user_struct(target_schp, arg2, 0);
10709             return get_errno(sched_setparam(arg1, &schp));
10710         }
10711     case TARGET_NR_sched_getparam:
10712         {
10713             struct sched_param *target_schp;
10714             struct sched_param schp;
10715 
10716             if (arg2 == 0) {
10717                 return -TARGET_EINVAL;
10718             }
10719             ret = get_errno(sched_getparam(arg1, &schp));
10720             if (!is_error(ret)) {
10721                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10722                     return -TARGET_EFAULT;
10723                 target_schp->sched_priority = tswap32(schp.sched_priority);
10724                 unlock_user_struct(target_schp, arg2, 1);
10725             }
10726         }
10727         return ret;
10728     case TARGET_NR_sched_setscheduler:
10729         {
10730             struct sched_param *target_schp;
10731             struct sched_param schp;
10732             if (arg3 == 0) {
10733                 return -TARGET_EINVAL;
10734             }
10735             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10736                 return -TARGET_EFAULT;
10737             schp.sched_priority = tswap32(target_schp->sched_priority);
10738             unlock_user_struct(target_schp, arg3, 0);
10739             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10740         }
10741     case TARGET_NR_sched_getscheduler:
10742         return get_errno(sched_getscheduler(arg1));
10743     case TARGET_NR_sched_yield:
10744         return get_errno(sched_yield());
10745     case TARGET_NR_sched_get_priority_max:
10746         return get_errno(sched_get_priority_max(arg1));
10747     case TARGET_NR_sched_get_priority_min:
10748         return get_errno(sched_get_priority_min(arg1));
10749 #ifdef TARGET_NR_sched_rr_get_interval
10750     case TARGET_NR_sched_rr_get_interval:
10751         {
10752             struct timespec ts;
10753             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10754             if (!is_error(ret)) {
10755                 ret = host_to_target_timespec(arg2, &ts);
10756             }
10757         }
10758         return ret;
10759 #endif
10760 #ifdef TARGET_NR_sched_rr_get_interval_time64
10761     case TARGET_NR_sched_rr_get_interval_time64:
10762         {
10763             struct timespec ts;
10764             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10765             if (!is_error(ret)) {
10766                 ret = host_to_target_timespec64(arg2, &ts);
10767             }
10768         }
10769         return ret;
10770 #endif
10771 #if defined(TARGET_NR_nanosleep)
10772     case TARGET_NR_nanosleep:
10773         {
10774             struct timespec req, rem;
10775             target_to_host_timespec(&req, arg1);
10776             ret = get_errno(safe_nanosleep(&req, &rem));
10777             if (is_error(ret) && arg2) {
10778                 host_to_target_timespec(arg2, &rem);
10779             }
10780         }
10781         return ret;
10782 #endif
10783     case TARGET_NR_prctl:
10784         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10785         break;
10786 #ifdef TARGET_NR_arch_prctl
10787     case TARGET_NR_arch_prctl:
10788         return do_arch_prctl(cpu_env, arg1, arg2);
10789 #endif
10790 #ifdef TARGET_NR_pread64
10791     case TARGET_NR_pread64:
10792         if (regpairs_aligned(cpu_env, num)) {
10793             arg4 = arg5;
10794             arg5 = arg6;
10795         }
10796         if (arg2 == 0 && arg3 == 0) {
10797             /* Special-case NULL buffer and zero length, which should succeed */
10798             p = 0;
10799         } else {
10800             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10801             if (!p) {
10802                 return -TARGET_EFAULT;
10803             }
10804         }
10805         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10806         unlock_user(p, arg2, ret);
10807         return ret;
10808     case TARGET_NR_pwrite64:
10809         if (regpairs_aligned(cpu_env, num)) {
10810             arg4 = arg5;
10811             arg5 = arg6;
10812         }
10813         if (arg2 == 0 && arg3 == 0) {
10814             /* Special-case NULL buffer and zero length, which should succeed */
10815             p = 0;
10816         } else {
10817             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10818             if (!p) {
10819                 return -TARGET_EFAULT;
10820             }
10821         }
10822         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10823         unlock_user(p, arg2, 0);
10824         return ret;
10825 #endif
10826     case TARGET_NR_getcwd:
10827         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10828             return -TARGET_EFAULT;
10829         ret = get_errno(sys_getcwd1(p, arg2));
10830         unlock_user(p, arg1, ret);
10831         return ret;
10832     case TARGET_NR_capget:
10833     case TARGET_NR_capset:
10834     {
10835         struct target_user_cap_header *target_header;
10836         struct target_user_cap_data *target_data = NULL;
10837         struct __user_cap_header_struct header;
10838         struct __user_cap_data_struct data[2];
10839         struct __user_cap_data_struct *dataptr = NULL;
10840         int i, target_datalen;
10841         int data_items = 1;
10842 
10843         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10844             return -TARGET_EFAULT;
10845         }
10846         header.version = tswap32(target_header->version);
10847         header.pid = tswap32(target_header->pid);
10848 
10849         if (header.version != _LINUX_CAPABILITY_VERSION) {
10850             /* Version 2 and up takes pointer to two user_data structs */
10851             data_items = 2;
10852         }
10853 
10854         target_datalen = sizeof(*target_data) * data_items;
10855 
10856         if (arg2) {
10857             if (num == TARGET_NR_capget) {
10858                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10859             } else {
10860                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10861             }
10862             if (!target_data) {
10863                 unlock_user_struct(target_header, arg1, 0);
10864                 return -TARGET_EFAULT;
10865             }
10866 
10867             if (num == TARGET_NR_capset) {
10868                 for (i = 0; i < data_items; i++) {
10869                     data[i].effective = tswap32(target_data[i].effective);
10870                     data[i].permitted = tswap32(target_data[i].permitted);
10871                     data[i].inheritable = tswap32(target_data[i].inheritable);
10872                 }
10873             }
10874 
10875             dataptr = data;
10876         }
10877 
10878         if (num == TARGET_NR_capget) {
10879             ret = get_errno(capget(&header, dataptr));
10880         } else {
10881             ret = get_errno(capset(&header, dataptr));
10882         }
10883 
10884         /* The kernel always updates version for both capget and capset */
10885         target_header->version = tswap32(header.version);
10886         unlock_user_struct(target_header, arg1, 1);
10887 
10888         if (arg2) {
10889             if (num == TARGET_NR_capget) {
10890                 for (i = 0; i < data_items; i++) {
10891                     target_data[i].effective = tswap32(data[i].effective);
10892                     target_data[i].permitted = tswap32(data[i].permitted);
10893                     target_data[i].inheritable = tswap32(data[i].inheritable);
10894                 }
10895                 unlock_user(target_data, arg2, target_datalen);
10896             } else {
10897                 unlock_user(target_data, arg2, 0);
10898             }
10899         }
10900         return ret;
10901     }
10902     case TARGET_NR_sigaltstack:
10903         return do_sigaltstack(arg1, arg2, cpu_env);
10904 
10905 #ifdef CONFIG_SENDFILE
10906 #ifdef TARGET_NR_sendfile
10907     case TARGET_NR_sendfile:
10908     {
10909         off_t *offp = NULL;
10910         off_t off;
10911         if (arg3) {
10912             ret = get_user_sal(off, arg3);
10913             if (is_error(ret)) {
10914                 return ret;
10915             }
10916             offp = &off;
10917         }
10918         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10919         if (!is_error(ret) && arg3) {
10920             abi_long ret2 = put_user_sal(off, arg3);
10921             if (is_error(ret2)) {
10922                 ret = ret2;
10923             }
10924         }
10925         return ret;
10926     }
10927 #endif
10928 #ifdef TARGET_NR_sendfile64
10929     case TARGET_NR_sendfile64:
10930     {
10931         off_t *offp = NULL;
10932         off_t off;
10933         if (arg3) {
10934             ret = get_user_s64(off, arg3);
10935             if (is_error(ret)) {
10936                 return ret;
10937             }
10938             offp = &off;
10939         }
10940         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10941         if (!is_error(ret) && arg3) {
10942             abi_long ret2 = put_user_s64(off, arg3);
10943             if (is_error(ret2)) {
10944                 ret = ret2;
10945             }
10946         }
10947         return ret;
10948     }
10949 #endif
10950 #endif
10951 #ifdef TARGET_NR_vfork
10952     case TARGET_NR_vfork:
10953         return get_errno(do_fork(cpu_env,
10954                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
10955                          0, 0, 0, 0));
10956 #endif
10957 #ifdef TARGET_NR_ugetrlimit
10958     case TARGET_NR_ugetrlimit:
10959     {
10960 	struct rlimit rlim;
10961 	int resource = target_to_host_resource(arg1);
10962 	ret = get_errno(getrlimit(resource, &rlim));
10963 	if (!is_error(ret)) {
10964 	    struct target_rlimit *target_rlim;
10965             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10966                 return -TARGET_EFAULT;
10967 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10968 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10969             unlock_user_struct(target_rlim, arg2, 1);
10970 	}
10971         return ret;
10972     }
10973 #endif
10974 #ifdef TARGET_NR_truncate64
10975     case TARGET_NR_truncate64:
10976         if (!(p = lock_user_string(arg1)))
10977             return -TARGET_EFAULT;
10978 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10979         unlock_user(p, arg1, 0);
10980         return ret;
10981 #endif
10982 #ifdef TARGET_NR_ftruncate64
10983     case TARGET_NR_ftruncate64:
10984         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10985 #endif
10986 #ifdef TARGET_NR_stat64
10987     case TARGET_NR_stat64:
10988         if (!(p = lock_user_string(arg1))) {
10989             return -TARGET_EFAULT;
10990         }
10991         ret = get_errno(stat(path(p), &st));
10992         unlock_user(p, arg1, 0);
10993         if (!is_error(ret))
10994             ret = host_to_target_stat64(cpu_env, arg2, &st);
10995         return ret;
10996 #endif
10997 #ifdef TARGET_NR_lstat64
10998     case TARGET_NR_lstat64:
10999         if (!(p = lock_user_string(arg1))) {
11000             return -TARGET_EFAULT;
11001         }
11002         ret = get_errno(lstat(path(p), &st));
11003         unlock_user(p, arg1, 0);
11004         if (!is_error(ret))
11005             ret = host_to_target_stat64(cpu_env, arg2, &st);
11006         return ret;
11007 #endif
11008 #ifdef TARGET_NR_fstat64
11009     case TARGET_NR_fstat64:
11010         ret = get_errno(fstat(arg1, &st));
11011         if (!is_error(ret))
11012             ret = host_to_target_stat64(cpu_env, arg2, &st);
11013         return ret;
11014 #endif
11015 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11016 #ifdef TARGET_NR_fstatat64
11017     case TARGET_NR_fstatat64:
11018 #endif
11019 #ifdef TARGET_NR_newfstatat
11020     case TARGET_NR_newfstatat:
11021 #endif
11022         if (!(p = lock_user_string(arg2))) {
11023             return -TARGET_EFAULT;
11024         }
11025         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11026         unlock_user(p, arg2, 0);
11027         if (!is_error(ret))
11028             ret = host_to_target_stat64(cpu_env, arg3, &st);
11029         return ret;
11030 #endif
11031 #if defined(TARGET_NR_statx)
11032     case TARGET_NR_statx:
11033         {
11034             struct target_statx *target_stx;
11035             int dirfd = arg1;
11036             int flags = arg3;
11037 
11038             p = lock_user_string(arg2);
11039             if (p == NULL) {
11040                 return -TARGET_EFAULT;
11041             }
11042 #if defined(__NR_statx)
11043             {
11044                 /*
11045                  * It is assumed that struct statx is architecture independent.
11046                  */
11047                 struct target_statx host_stx;
11048                 int mask = arg4;
11049 
11050                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11051                 if (!is_error(ret)) {
11052                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11053                         unlock_user(p, arg2, 0);
11054                         return -TARGET_EFAULT;
11055                     }
11056                 }
11057 
11058                 if (ret != -TARGET_ENOSYS) {
11059                     unlock_user(p, arg2, 0);
11060                     return ret;
11061                 }
11062             }
11063 #endif
11064             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11065             unlock_user(p, arg2, 0);
11066 
11067             if (!is_error(ret)) {
11068                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11069                     return -TARGET_EFAULT;
11070                 }
11071                 memset(target_stx, 0, sizeof(*target_stx));
11072                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11073                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11074                 __put_user(st.st_ino, &target_stx->stx_ino);
11075                 __put_user(st.st_mode, &target_stx->stx_mode);
11076                 __put_user(st.st_uid, &target_stx->stx_uid);
11077                 __put_user(st.st_gid, &target_stx->stx_gid);
11078                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11079                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11080                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11081                 __put_user(st.st_size, &target_stx->stx_size);
11082                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11083                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11084                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11085                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11086                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11087                 unlock_user_struct(target_stx, arg5, 1);
11088             }
11089         }
11090         return ret;
11091 #endif
11092 #ifdef TARGET_NR_lchown
11093     case TARGET_NR_lchown:
11094         if (!(p = lock_user_string(arg1)))
11095             return -TARGET_EFAULT;
11096         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11097         unlock_user(p, arg1, 0);
11098         return ret;
11099 #endif
11100 #ifdef TARGET_NR_getuid
11101     case TARGET_NR_getuid:
11102         return get_errno(high2lowuid(getuid()));
11103 #endif
11104 #ifdef TARGET_NR_getgid
11105     case TARGET_NR_getgid:
11106         return get_errno(high2lowgid(getgid()));
11107 #endif
11108 #ifdef TARGET_NR_geteuid
11109     case TARGET_NR_geteuid:
11110         return get_errno(high2lowuid(geteuid()));
11111 #endif
11112 #ifdef TARGET_NR_getegid
11113     case TARGET_NR_getegid:
11114         return get_errno(high2lowgid(getegid()));
11115 #endif
11116     case TARGET_NR_setreuid:
11117         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11118     case TARGET_NR_setregid:
11119         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11120     case TARGET_NR_getgroups:
11121         {
11122             int gidsetsize = arg1;
11123             target_id *target_grouplist;
11124             gid_t *grouplist;
11125             int i;
11126 
11127             grouplist = alloca(gidsetsize * sizeof(gid_t));
11128             ret = get_errno(getgroups(gidsetsize, grouplist));
11129             if (gidsetsize == 0)
11130                 return ret;
11131             if (!is_error(ret)) {
11132                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11133                 if (!target_grouplist)
11134                     return -TARGET_EFAULT;
11135                 for(i = 0;i < ret; i++)
11136                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11137                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11138             }
11139         }
11140         return ret;
11141     case TARGET_NR_setgroups:
11142         {
11143             int gidsetsize = arg1;
11144             target_id *target_grouplist;
11145             gid_t *grouplist = NULL;
11146             int i;
11147             if (gidsetsize) {
11148                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11149                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11150                 if (!target_grouplist) {
11151                     return -TARGET_EFAULT;
11152                 }
11153                 for (i = 0; i < gidsetsize; i++) {
11154                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11155                 }
11156                 unlock_user(target_grouplist, arg2, 0);
11157             }
11158             return get_errno(setgroups(gidsetsize, grouplist));
11159         }
11160     case TARGET_NR_fchown:
11161         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11162 #if defined(TARGET_NR_fchownat)
11163     case TARGET_NR_fchownat:
11164         if (!(p = lock_user_string(arg2)))
11165             return -TARGET_EFAULT;
11166         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11167                                  low2highgid(arg4), arg5));
11168         unlock_user(p, arg2, 0);
11169         return ret;
11170 #endif
11171 #ifdef TARGET_NR_setresuid
11172     case TARGET_NR_setresuid:
11173         return get_errno(sys_setresuid(low2highuid(arg1),
11174                                        low2highuid(arg2),
11175                                        low2highuid(arg3)));
11176 #endif
11177 #ifdef TARGET_NR_getresuid
11178     case TARGET_NR_getresuid:
11179         {
11180             uid_t ruid, euid, suid;
11181             ret = get_errno(getresuid(&ruid, &euid, &suid));
11182             if (!is_error(ret)) {
11183                 if (put_user_id(high2lowuid(ruid), arg1)
11184                     || put_user_id(high2lowuid(euid), arg2)
11185                     || put_user_id(high2lowuid(suid), arg3))
11186                     return -TARGET_EFAULT;
11187             }
11188         }
11189         return ret;
11190 #endif
11191 #ifdef TARGET_NR_getresgid
11192     case TARGET_NR_setresgid:
11193         return get_errno(sys_setresgid(low2highgid(arg1),
11194                                        low2highgid(arg2),
11195                                        low2highgid(arg3)));
11196 #endif
11197 #ifdef TARGET_NR_getresgid
11198     case TARGET_NR_getresgid:
11199         {
11200             gid_t rgid, egid, sgid;
11201             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11202             if (!is_error(ret)) {
11203                 if (put_user_id(high2lowgid(rgid), arg1)
11204                     || put_user_id(high2lowgid(egid), arg2)
11205                     || put_user_id(high2lowgid(sgid), arg3))
11206                     return -TARGET_EFAULT;
11207             }
11208         }
11209         return ret;
11210 #endif
11211 #ifdef TARGET_NR_chown
11212     case TARGET_NR_chown:
11213         if (!(p = lock_user_string(arg1)))
11214             return -TARGET_EFAULT;
11215         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11216         unlock_user(p, arg1, 0);
11217         return ret;
11218 #endif
11219     case TARGET_NR_setuid:
11220         return get_errno(sys_setuid(low2highuid(arg1)));
11221     case TARGET_NR_setgid:
11222         return get_errno(sys_setgid(low2highgid(arg1)));
11223     case TARGET_NR_setfsuid:
11224         return get_errno(setfsuid(arg1));
11225     case TARGET_NR_setfsgid:
11226         return get_errno(setfsgid(arg1));
11227 
11228 #ifdef TARGET_NR_lchown32
11229     case TARGET_NR_lchown32:
11230         if (!(p = lock_user_string(arg1)))
11231             return -TARGET_EFAULT;
11232         ret = get_errno(lchown(p, arg2, arg3));
11233         unlock_user(p, arg1, 0);
11234         return ret;
11235 #endif
11236 #ifdef TARGET_NR_getuid32
11237     case TARGET_NR_getuid32:
11238         return get_errno(getuid());
11239 #endif
11240 
11241 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11242    /* Alpha specific */
11243     case TARGET_NR_getxuid:
11244          {
11245             uid_t euid;
11246             euid=geteuid();
11247             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11248          }
11249         return get_errno(getuid());
11250 #endif
11251 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11252    /* Alpha specific */
11253     case TARGET_NR_getxgid:
11254          {
11255             uid_t egid;
11256             egid=getegid();
11257             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11258          }
11259         return get_errno(getgid());
11260 #endif
11261 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11262     /* Alpha specific */
11263     case TARGET_NR_osf_getsysinfo:
11264         ret = -TARGET_EOPNOTSUPP;
11265         switch (arg1) {
11266           case TARGET_GSI_IEEE_FP_CONTROL:
11267             {
11268                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11269                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11270 
11271                 swcr &= ~SWCR_STATUS_MASK;
11272                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11273 
11274                 if (put_user_u64 (swcr, arg2))
11275                         return -TARGET_EFAULT;
11276                 ret = 0;
11277             }
11278             break;
11279 
11280           /* case GSI_IEEE_STATE_AT_SIGNAL:
11281              -- Not implemented in linux kernel.
11282              case GSI_UACPROC:
11283              -- Retrieves current unaligned access state; not much used.
11284              case GSI_PROC_TYPE:
11285              -- Retrieves implver information; surely not used.
11286              case GSI_GET_HWRPB:
11287              -- Grabs a copy of the HWRPB; surely not used.
11288           */
11289         }
11290         return ret;
11291 #endif
11292 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11293     /* Alpha specific */
11294     case TARGET_NR_osf_setsysinfo:
11295         ret = -TARGET_EOPNOTSUPP;
11296         switch (arg1) {
11297           case TARGET_SSI_IEEE_FP_CONTROL:
11298             {
11299                 uint64_t swcr, fpcr;
11300 
11301                 if (get_user_u64 (swcr, arg2)) {
11302                     return -TARGET_EFAULT;
11303                 }
11304 
11305                 /*
11306                  * The kernel calls swcr_update_status to update the
11307                  * status bits from the fpcr at every point that it
11308                  * could be queried.  Therefore, we store the status
11309                  * bits only in FPCR.
11310                  */
11311                 ((CPUAlphaState *)cpu_env)->swcr
11312                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11313 
11314                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11315                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11316                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11317                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11318                 ret = 0;
11319             }
11320             break;
11321 
11322           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11323             {
11324                 uint64_t exc, fpcr, fex;
11325 
11326                 if (get_user_u64(exc, arg2)) {
11327                     return -TARGET_EFAULT;
11328                 }
11329                 exc &= SWCR_STATUS_MASK;
11330                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11331 
11332                 /* Old exceptions are not signaled.  */
11333                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11334                 fex = exc & ~fex;
11335                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11336                 fex &= ((CPUArchState *)cpu_env)->swcr;
11337 
11338                 /* Update the hardware fpcr.  */
11339                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11340                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11341 
11342                 if (fex) {
11343                     int si_code = TARGET_FPE_FLTUNK;
11344                     target_siginfo_t info;
11345 
11346                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11347                         si_code = TARGET_FPE_FLTUND;
11348                     }
11349                     if (fex & SWCR_TRAP_ENABLE_INE) {
11350                         si_code = TARGET_FPE_FLTRES;
11351                     }
11352                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11353                         si_code = TARGET_FPE_FLTUND;
11354                     }
11355                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11356                         si_code = TARGET_FPE_FLTOVF;
11357                     }
11358                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11359                         si_code = TARGET_FPE_FLTDIV;
11360                     }
11361                     if (fex & SWCR_TRAP_ENABLE_INV) {
11362                         si_code = TARGET_FPE_FLTINV;
11363                     }
11364 
11365                     info.si_signo = SIGFPE;
11366                     info.si_errno = 0;
11367                     info.si_code = si_code;
11368                     info._sifields._sigfault._addr
11369                         = ((CPUArchState *)cpu_env)->pc;
11370                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11371                                  QEMU_SI_FAULT, &info);
11372                 }
11373                 ret = 0;
11374             }
11375             break;
11376 
11377           /* case SSI_NVPAIRS:
11378              -- Used with SSIN_UACPROC to enable unaligned accesses.
11379              case SSI_IEEE_STATE_AT_SIGNAL:
11380              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11381              -- Not implemented in linux kernel
11382           */
11383         }
11384         return ret;
11385 #endif
11386 #ifdef TARGET_NR_osf_sigprocmask
11387     /* Alpha specific.  */
11388     case TARGET_NR_osf_sigprocmask:
11389         {
11390             abi_ulong mask;
11391             int how;
11392             sigset_t set, oldset;
11393 
11394             switch(arg1) {
11395             case TARGET_SIG_BLOCK:
11396                 how = SIG_BLOCK;
11397                 break;
11398             case TARGET_SIG_UNBLOCK:
11399                 how = SIG_UNBLOCK;
11400                 break;
11401             case TARGET_SIG_SETMASK:
11402                 how = SIG_SETMASK;
11403                 break;
11404             default:
11405                 return -TARGET_EINVAL;
11406             }
11407             mask = arg2;
11408             target_to_host_old_sigset(&set, &mask);
11409             ret = do_sigprocmask(how, &set, &oldset);
11410             if (!ret) {
11411                 host_to_target_old_sigset(&mask, &oldset);
11412                 ret = mask;
11413             }
11414         }
11415         return ret;
11416 #endif
11417 
11418 #ifdef TARGET_NR_getgid32
11419     case TARGET_NR_getgid32:
11420         return get_errno(getgid());
11421 #endif
11422 #ifdef TARGET_NR_geteuid32
11423     case TARGET_NR_geteuid32:
11424         return get_errno(geteuid());
11425 #endif
11426 #ifdef TARGET_NR_getegid32
11427     case TARGET_NR_getegid32:
11428         return get_errno(getegid());
11429 #endif
11430 #ifdef TARGET_NR_setreuid32
11431     case TARGET_NR_setreuid32:
11432         return get_errno(setreuid(arg1, arg2));
11433 #endif
11434 #ifdef TARGET_NR_setregid32
11435     case TARGET_NR_setregid32:
11436         return get_errno(setregid(arg1, arg2));
11437 #endif
11438 #ifdef TARGET_NR_getgroups32
11439     case TARGET_NR_getgroups32:
11440         {
11441             int gidsetsize = arg1;
11442             uint32_t *target_grouplist;
11443             gid_t *grouplist;
11444             int i;
11445 
11446             grouplist = alloca(gidsetsize * sizeof(gid_t));
11447             ret = get_errno(getgroups(gidsetsize, grouplist));
11448             if (gidsetsize == 0)
11449                 return ret;
11450             if (!is_error(ret)) {
11451                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11452                 if (!target_grouplist) {
11453                     return -TARGET_EFAULT;
11454                 }
11455                 for(i = 0;i < ret; i++)
11456                     target_grouplist[i] = tswap32(grouplist[i]);
11457                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11458             }
11459         }
11460         return ret;
11461 #endif
11462 #ifdef TARGET_NR_setgroups32
11463     case TARGET_NR_setgroups32:
11464         {
11465             int gidsetsize = arg1;
11466             uint32_t *target_grouplist;
11467             gid_t *grouplist;
11468             int i;
11469 
11470             grouplist = alloca(gidsetsize * sizeof(gid_t));
11471             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11472             if (!target_grouplist) {
11473                 return -TARGET_EFAULT;
11474             }
11475             for(i = 0;i < gidsetsize; i++)
11476                 grouplist[i] = tswap32(target_grouplist[i]);
11477             unlock_user(target_grouplist, arg2, 0);
11478             return get_errno(setgroups(gidsetsize, grouplist));
11479         }
11480 #endif
11481 #ifdef TARGET_NR_fchown32
11482     case TARGET_NR_fchown32:
11483         return get_errno(fchown(arg1, arg2, arg3));
11484 #endif
11485 #ifdef TARGET_NR_setresuid32
11486     case TARGET_NR_setresuid32:
11487         return get_errno(sys_setresuid(arg1, arg2, arg3));
11488 #endif
11489 #ifdef TARGET_NR_getresuid32
11490     case TARGET_NR_getresuid32:
11491         {
11492             uid_t ruid, euid, suid;
11493             ret = get_errno(getresuid(&ruid, &euid, &suid));
11494             if (!is_error(ret)) {
11495                 if (put_user_u32(ruid, arg1)
11496                     || put_user_u32(euid, arg2)
11497                     || put_user_u32(suid, arg3))
11498                     return -TARGET_EFAULT;
11499             }
11500         }
11501         return ret;
11502 #endif
11503 #ifdef TARGET_NR_setresgid32
11504     case TARGET_NR_setresgid32:
11505         return get_errno(sys_setresgid(arg1, arg2, arg3));
11506 #endif
11507 #ifdef TARGET_NR_getresgid32
11508     case TARGET_NR_getresgid32:
11509         {
11510             gid_t rgid, egid, sgid;
11511             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11512             if (!is_error(ret)) {
11513                 if (put_user_u32(rgid, arg1)
11514                     || put_user_u32(egid, arg2)
11515                     || put_user_u32(sgid, arg3))
11516                     return -TARGET_EFAULT;
11517             }
11518         }
11519         return ret;
11520 #endif
11521 #ifdef TARGET_NR_chown32
11522     case TARGET_NR_chown32:
11523         if (!(p = lock_user_string(arg1)))
11524             return -TARGET_EFAULT;
11525         ret = get_errno(chown(p, arg2, arg3));
11526         unlock_user(p, arg1, 0);
11527         return ret;
11528 #endif
11529 #ifdef TARGET_NR_setuid32
11530     case TARGET_NR_setuid32:
11531         return get_errno(sys_setuid(arg1));
11532 #endif
11533 #ifdef TARGET_NR_setgid32
11534     case TARGET_NR_setgid32:
11535         return get_errno(sys_setgid(arg1));
11536 #endif
11537 #ifdef TARGET_NR_setfsuid32
11538     case TARGET_NR_setfsuid32:
11539         return get_errno(setfsuid(arg1));
11540 #endif
11541 #ifdef TARGET_NR_setfsgid32
11542     case TARGET_NR_setfsgid32:
11543         return get_errno(setfsgid(arg1));
11544 #endif
11545 #ifdef TARGET_NR_mincore
11546     case TARGET_NR_mincore:
11547         {
11548             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11549             if (!a) {
11550                 return -TARGET_ENOMEM;
11551             }
11552             p = lock_user_string(arg3);
11553             if (!p) {
11554                 ret = -TARGET_EFAULT;
11555             } else {
11556                 ret = get_errno(mincore(a, arg2, p));
11557                 unlock_user(p, arg3, ret);
11558             }
11559             unlock_user(a, arg1, 0);
11560         }
11561         return ret;
11562 #endif
11563 #ifdef TARGET_NR_arm_fadvise64_64
11564     case TARGET_NR_arm_fadvise64_64:
11565         /* arm_fadvise64_64 looks like fadvise64_64 but
11566          * with different argument order: fd, advice, offset, len
11567          * rather than the usual fd, offset, len, advice.
11568          * Note that offset and len are both 64-bit so appear as
11569          * pairs of 32-bit registers.
11570          */
11571         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11572                             target_offset64(arg5, arg6), arg2);
11573         return -host_to_target_errno(ret);
11574 #endif
11575 
11576 #if TARGET_ABI_BITS == 32
11577 
11578 #ifdef TARGET_NR_fadvise64_64
11579     case TARGET_NR_fadvise64_64:
11580 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11581         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11582         ret = arg2;
11583         arg2 = arg3;
11584         arg3 = arg4;
11585         arg4 = arg5;
11586         arg5 = arg6;
11587         arg6 = ret;
11588 #else
11589         /* 6 args: fd, offset (high, low), len (high, low), advice */
11590         if (regpairs_aligned(cpu_env, num)) {
11591             /* offset is in (3,4), len in (5,6) and advice in 7 */
11592             arg2 = arg3;
11593             arg3 = arg4;
11594             arg4 = arg5;
11595             arg5 = arg6;
11596             arg6 = arg7;
11597         }
11598 #endif
11599         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11600                             target_offset64(arg4, arg5), arg6);
11601         return -host_to_target_errno(ret);
11602 #endif
11603 
11604 #ifdef TARGET_NR_fadvise64
11605     case TARGET_NR_fadvise64:
11606         /* 5 args: fd, offset (high, low), len, advice */
11607         if (regpairs_aligned(cpu_env, num)) {
11608             /* offset is in (3,4), len in 5 and advice in 6 */
11609             arg2 = arg3;
11610             arg3 = arg4;
11611             arg4 = arg5;
11612             arg5 = arg6;
11613         }
11614         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11615         return -host_to_target_errno(ret);
11616 #endif
11617 
11618 #else /* not a 32-bit ABI */
11619 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11620 #ifdef TARGET_NR_fadvise64_64
11621     case TARGET_NR_fadvise64_64:
11622 #endif
11623 #ifdef TARGET_NR_fadvise64
11624     case TARGET_NR_fadvise64:
11625 #endif
11626 #ifdef TARGET_S390X
11627         switch (arg4) {
11628         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11629         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11630         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11631         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11632         default: break;
11633         }
11634 #endif
11635         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11636 #endif
11637 #endif /* end of 64-bit ABI fadvise handling */
11638 
11639 #ifdef TARGET_NR_madvise
11640     case TARGET_NR_madvise:
11641         /* A straight passthrough may not be safe because qemu sometimes
11642            turns private file-backed mappings into anonymous mappings.
11643            This will break MADV_DONTNEED.
11644            This is a hint, so ignoring and returning success is ok.  */
11645         return 0;
11646 #endif
11647 #ifdef TARGET_NR_fcntl64
11648     case TARGET_NR_fcntl64:
11649     {
11650         int cmd;
11651         struct flock64 fl;
11652         from_flock64_fn *copyfrom = copy_from_user_flock64;
11653         to_flock64_fn *copyto = copy_to_user_flock64;
11654 
11655 #ifdef TARGET_ARM
11656         if (!((CPUARMState *)cpu_env)->eabi) {
11657             copyfrom = copy_from_user_oabi_flock64;
11658             copyto = copy_to_user_oabi_flock64;
11659         }
11660 #endif
11661 
11662         cmd = target_to_host_fcntl_cmd(arg2);
11663         if (cmd == -TARGET_EINVAL) {
11664             return cmd;
11665         }
11666 
11667         switch(arg2) {
11668         case TARGET_F_GETLK64:
11669             ret = copyfrom(&fl, arg3);
11670             if (ret) {
11671                 break;
11672             }
11673             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11674             if (ret == 0) {
11675                 ret = copyto(arg3, &fl);
11676             }
11677 	    break;
11678 
11679         case TARGET_F_SETLK64:
11680         case TARGET_F_SETLKW64:
11681             ret = copyfrom(&fl, arg3);
11682             if (ret) {
11683                 break;
11684             }
11685             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11686 	    break;
11687         default:
11688             ret = do_fcntl(arg1, arg2, arg3);
11689             break;
11690         }
11691         return ret;
11692     }
11693 #endif
11694 #ifdef TARGET_NR_cacheflush
11695     case TARGET_NR_cacheflush:
11696         /* self-modifying code is handled automatically, so nothing needed */
11697         return 0;
11698 #endif
11699 #ifdef TARGET_NR_getpagesize
11700     case TARGET_NR_getpagesize:
11701         return TARGET_PAGE_SIZE;
11702 #endif
11703     case TARGET_NR_gettid:
11704         return get_errno(sys_gettid());
11705 #ifdef TARGET_NR_readahead
11706     case TARGET_NR_readahead:
11707 #if TARGET_ABI_BITS == 32
11708         if (regpairs_aligned(cpu_env, num)) {
11709             arg2 = arg3;
11710             arg3 = arg4;
11711             arg4 = arg5;
11712         }
11713         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11714 #else
11715         ret = get_errno(readahead(arg1, arg2, arg3));
11716 #endif
11717         return ret;
11718 #endif
11719 #ifdef CONFIG_ATTR
11720 #ifdef TARGET_NR_setxattr
11721     case TARGET_NR_listxattr:
11722     case TARGET_NR_llistxattr:
11723     {
11724         void *p, *b = 0;
11725         if (arg2) {
11726             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11727             if (!b) {
11728                 return -TARGET_EFAULT;
11729             }
11730         }
11731         p = lock_user_string(arg1);
11732         if (p) {
11733             if (num == TARGET_NR_listxattr) {
11734                 ret = get_errno(listxattr(p, b, arg3));
11735             } else {
11736                 ret = get_errno(llistxattr(p, b, arg3));
11737             }
11738         } else {
11739             ret = -TARGET_EFAULT;
11740         }
11741         unlock_user(p, arg1, 0);
11742         unlock_user(b, arg2, arg3);
11743         return ret;
11744     }
11745     case TARGET_NR_flistxattr:
11746     {
11747         void *b = 0;
11748         if (arg2) {
11749             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11750             if (!b) {
11751                 return -TARGET_EFAULT;
11752             }
11753         }
11754         ret = get_errno(flistxattr(arg1, b, arg3));
11755         unlock_user(b, arg2, arg3);
11756         return ret;
11757     }
11758     case TARGET_NR_setxattr:
11759     case TARGET_NR_lsetxattr:
11760         {
11761             void *p, *n, *v = 0;
11762             if (arg3) {
11763                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11764                 if (!v) {
11765                     return -TARGET_EFAULT;
11766                 }
11767             }
11768             p = lock_user_string(arg1);
11769             n = lock_user_string(arg2);
11770             if (p && n) {
11771                 if (num == TARGET_NR_setxattr) {
11772                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11773                 } else {
11774                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11775                 }
11776             } else {
11777                 ret = -TARGET_EFAULT;
11778             }
11779             unlock_user(p, arg1, 0);
11780             unlock_user(n, arg2, 0);
11781             unlock_user(v, arg3, 0);
11782         }
11783         return ret;
11784     case TARGET_NR_fsetxattr:
11785         {
11786             void *n, *v = 0;
11787             if (arg3) {
11788                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11789                 if (!v) {
11790                     return -TARGET_EFAULT;
11791                 }
11792             }
11793             n = lock_user_string(arg2);
11794             if (n) {
11795                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11796             } else {
11797                 ret = -TARGET_EFAULT;
11798             }
11799             unlock_user(n, arg2, 0);
11800             unlock_user(v, arg3, 0);
11801         }
11802         return ret;
11803     case TARGET_NR_getxattr:
11804     case TARGET_NR_lgetxattr:
11805         {
11806             void *p, *n, *v = 0;
11807             if (arg3) {
11808                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11809                 if (!v) {
11810                     return -TARGET_EFAULT;
11811                 }
11812             }
11813             p = lock_user_string(arg1);
11814             n = lock_user_string(arg2);
11815             if (p && n) {
11816                 if (num == TARGET_NR_getxattr) {
11817                     ret = get_errno(getxattr(p, n, v, arg4));
11818                 } else {
11819                     ret = get_errno(lgetxattr(p, n, v, arg4));
11820                 }
11821             } else {
11822                 ret = -TARGET_EFAULT;
11823             }
11824             unlock_user(p, arg1, 0);
11825             unlock_user(n, arg2, 0);
11826             unlock_user(v, arg3, arg4);
11827         }
11828         return ret;
11829     case TARGET_NR_fgetxattr:
11830         {
11831             void *n, *v = 0;
11832             if (arg3) {
11833                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11834                 if (!v) {
11835                     return -TARGET_EFAULT;
11836                 }
11837             }
11838             n = lock_user_string(arg2);
11839             if (n) {
11840                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11841             } else {
11842                 ret = -TARGET_EFAULT;
11843             }
11844             unlock_user(n, arg2, 0);
11845             unlock_user(v, arg3, arg4);
11846         }
11847         return ret;
11848     case TARGET_NR_removexattr:
11849     case TARGET_NR_lremovexattr:
11850         {
11851             void *p, *n;
11852             p = lock_user_string(arg1);
11853             n = lock_user_string(arg2);
11854             if (p && n) {
11855                 if (num == TARGET_NR_removexattr) {
11856                     ret = get_errno(removexattr(p, n));
11857                 } else {
11858                     ret = get_errno(lremovexattr(p, n));
11859                 }
11860             } else {
11861                 ret = -TARGET_EFAULT;
11862             }
11863             unlock_user(p, arg1, 0);
11864             unlock_user(n, arg2, 0);
11865         }
11866         return ret;
11867     case TARGET_NR_fremovexattr:
11868         {
11869             void *n;
11870             n = lock_user_string(arg2);
11871             if (n) {
11872                 ret = get_errno(fremovexattr(arg1, n));
11873             } else {
11874                 ret = -TARGET_EFAULT;
11875             }
11876             unlock_user(n, arg2, 0);
11877         }
11878         return ret;
11879 #endif
11880 #endif /* CONFIG_ATTR */
11881 #ifdef TARGET_NR_set_thread_area
11882     case TARGET_NR_set_thread_area:
11883 #if defined(TARGET_MIPS)
11884       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11885       return 0;
11886 #elif defined(TARGET_CRIS)
11887       if (arg1 & 0xff)
11888           ret = -TARGET_EINVAL;
11889       else {
11890           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11891           ret = 0;
11892       }
11893       return ret;
11894 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11895       return do_set_thread_area(cpu_env, arg1);
11896 #elif defined(TARGET_M68K)
11897       {
11898           TaskState *ts = cpu->opaque;
11899           ts->tp_value = arg1;
11900           return 0;
11901       }
11902 #else
11903       return -TARGET_ENOSYS;
11904 #endif
11905 #endif
11906 #ifdef TARGET_NR_get_thread_area
11907     case TARGET_NR_get_thread_area:
11908 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11909         return do_get_thread_area(cpu_env, arg1);
11910 #elif defined(TARGET_M68K)
11911         {
11912             TaskState *ts = cpu->opaque;
11913             return ts->tp_value;
11914         }
11915 #else
11916         return -TARGET_ENOSYS;
11917 #endif
11918 #endif
11919 #ifdef TARGET_NR_getdomainname
11920     case TARGET_NR_getdomainname:
11921         return -TARGET_ENOSYS;
11922 #endif
11923 
11924 #ifdef TARGET_NR_clock_settime
11925     case TARGET_NR_clock_settime:
11926     {
11927         struct timespec ts;
11928 
11929         ret = target_to_host_timespec(&ts, arg2);
11930         if (!is_error(ret)) {
11931             ret = get_errno(clock_settime(arg1, &ts));
11932         }
11933         return ret;
11934     }
11935 #endif
11936 #ifdef TARGET_NR_clock_settime64
11937     case TARGET_NR_clock_settime64:
11938     {
11939         struct timespec ts;
11940 
11941         ret = target_to_host_timespec64(&ts, arg2);
11942         if (!is_error(ret)) {
11943             ret = get_errno(clock_settime(arg1, &ts));
11944         }
11945         return ret;
11946     }
11947 #endif
11948 #ifdef TARGET_NR_clock_gettime
11949     case TARGET_NR_clock_gettime:
11950     {
11951         struct timespec ts;
11952         ret = get_errno(clock_gettime(arg1, &ts));
11953         if (!is_error(ret)) {
11954             ret = host_to_target_timespec(arg2, &ts);
11955         }
11956         return ret;
11957     }
11958 #endif
11959 #ifdef TARGET_NR_clock_gettime64
11960     case TARGET_NR_clock_gettime64:
11961     {
11962         struct timespec ts;
11963         ret = get_errno(clock_gettime(arg1, &ts));
11964         if (!is_error(ret)) {
11965             ret = host_to_target_timespec64(arg2, &ts);
11966         }
11967         return ret;
11968     }
11969 #endif
11970 #ifdef TARGET_NR_clock_getres
11971     case TARGET_NR_clock_getres:
11972     {
11973         struct timespec ts;
11974         ret = get_errno(clock_getres(arg1, &ts));
11975         if (!is_error(ret)) {
11976             host_to_target_timespec(arg2, &ts);
11977         }
11978         return ret;
11979     }
11980 #endif
11981 #ifdef TARGET_NR_clock_getres_time64
11982     case TARGET_NR_clock_getres_time64:
11983     {
11984         struct timespec ts;
11985         ret = get_errno(clock_getres(arg1, &ts));
11986         if (!is_error(ret)) {
11987             host_to_target_timespec64(arg2, &ts);
11988         }
11989         return ret;
11990     }
11991 #endif
11992 #ifdef TARGET_NR_clock_nanosleep
11993     case TARGET_NR_clock_nanosleep:
11994     {
11995         struct timespec ts;
11996         if (target_to_host_timespec(&ts, arg3)) {
11997             return -TARGET_EFAULT;
11998         }
11999         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12000                                              &ts, arg4 ? &ts : NULL));
12001         /*
12002          * if the call is interrupted by a signal handler, it fails
12003          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12004          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12005          */
12006         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12007             host_to_target_timespec(arg4, &ts)) {
12008               return -TARGET_EFAULT;
12009         }
12010 
12011         return ret;
12012     }
12013 #endif
12014 #ifdef TARGET_NR_clock_nanosleep_time64
12015     case TARGET_NR_clock_nanosleep_time64:
12016     {
12017         struct timespec ts;
12018 
12019         if (target_to_host_timespec64(&ts, arg3)) {
12020             return -TARGET_EFAULT;
12021         }
12022 
12023         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12024                                              &ts, arg4 ? &ts : NULL));
12025 
12026         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12027             host_to_target_timespec64(arg4, &ts)) {
12028             return -TARGET_EFAULT;
12029         }
12030         return ret;
12031     }
12032 #endif
12033 
12034 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12035     case TARGET_NR_set_tid_address:
12036         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12037 #endif
12038 
12039     case TARGET_NR_tkill:
12040         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12041 
12042     case TARGET_NR_tgkill:
12043         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12044                          target_to_host_signal(arg3)));
12045 
12046 #ifdef TARGET_NR_set_robust_list
12047     case TARGET_NR_set_robust_list:
12048     case TARGET_NR_get_robust_list:
12049         /* The ABI for supporting robust futexes has userspace pass
12050          * the kernel a pointer to a linked list which is updated by
12051          * userspace after the syscall; the list is walked by the kernel
12052          * when the thread exits. Since the linked list in QEMU guest
12053          * memory isn't a valid linked list for the host and we have
12054          * no way to reliably intercept the thread-death event, we can't
12055          * support these. Silently return ENOSYS so that guest userspace
12056          * falls back to a non-robust futex implementation (which should
12057          * be OK except in the corner case of the guest crashing while
12058          * holding a mutex that is shared with another process via
12059          * shared memory).
12060          */
12061         return -TARGET_ENOSYS;
12062 #endif
12063 
12064 #if defined(TARGET_NR_utimensat)
12065     case TARGET_NR_utimensat:
12066         {
12067             struct timespec *tsp, ts[2];
12068             if (!arg3) {
12069                 tsp = NULL;
12070             } else {
12071                 if (target_to_host_timespec(ts, arg3)) {
12072                     return -TARGET_EFAULT;
12073                 }
12074                 if (target_to_host_timespec(ts + 1, arg3 +
12075                                             sizeof(struct target_timespec))) {
12076                     return -TARGET_EFAULT;
12077                 }
12078                 tsp = ts;
12079             }
12080             if (!arg2)
12081                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12082             else {
12083                 if (!(p = lock_user_string(arg2))) {
12084                     return -TARGET_EFAULT;
12085                 }
12086                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12087                 unlock_user(p, arg2, 0);
12088             }
12089         }
12090         return ret;
12091 #endif
12092 #ifdef TARGET_NR_utimensat_time64
12093     case TARGET_NR_utimensat_time64:
12094         {
12095             struct timespec *tsp, ts[2];
12096             if (!arg3) {
12097                 tsp = NULL;
12098             } else {
12099                 if (target_to_host_timespec64(ts, arg3)) {
12100                     return -TARGET_EFAULT;
12101                 }
12102                 if (target_to_host_timespec64(ts + 1, arg3 +
12103                                      sizeof(struct target__kernel_timespec))) {
12104                     return -TARGET_EFAULT;
12105                 }
12106                 tsp = ts;
12107             }
12108             if (!arg2)
12109                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12110             else {
12111                 p = lock_user_string(arg2);
12112                 if (!p) {
12113                     return -TARGET_EFAULT;
12114                 }
12115                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12116                 unlock_user(p, arg2, 0);
12117             }
12118         }
12119         return ret;
12120 #endif
12121 #ifdef TARGET_NR_futex
12122     case TARGET_NR_futex:
12123         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12124 #endif
12125 #ifdef TARGET_NR_futex_time64
12126     case TARGET_NR_futex_time64:
12127         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12128 #endif
12129 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12130     case TARGET_NR_inotify_init:
12131         ret = get_errno(sys_inotify_init());
12132         if (ret >= 0) {
12133             fd_trans_register(ret, &target_inotify_trans);
12134         }
12135         return ret;
12136 #endif
12137 #ifdef CONFIG_INOTIFY1
12138 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12139     case TARGET_NR_inotify_init1:
12140         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12141                                           fcntl_flags_tbl)));
12142         if (ret >= 0) {
12143             fd_trans_register(ret, &target_inotify_trans);
12144         }
12145         return ret;
12146 #endif
12147 #endif
12148 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12149     case TARGET_NR_inotify_add_watch:
12150         p = lock_user_string(arg2);
12151         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12152         unlock_user(p, arg2, 0);
12153         return ret;
12154 #endif
12155 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12156     case TARGET_NR_inotify_rm_watch:
12157         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12158 #endif
12159 
12160 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12161     case TARGET_NR_mq_open:
12162         {
12163             struct mq_attr posix_mq_attr;
12164             struct mq_attr *pposix_mq_attr;
12165             int host_flags;
12166 
12167             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12168             pposix_mq_attr = NULL;
12169             if (arg4) {
12170                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12171                     return -TARGET_EFAULT;
12172                 }
12173                 pposix_mq_attr = &posix_mq_attr;
12174             }
12175             p = lock_user_string(arg1 - 1);
12176             if (!p) {
12177                 return -TARGET_EFAULT;
12178             }
12179             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12180             unlock_user (p, arg1, 0);
12181         }
12182         return ret;
12183 
12184     case TARGET_NR_mq_unlink:
12185         p = lock_user_string(arg1 - 1);
12186         if (!p) {
12187             return -TARGET_EFAULT;
12188         }
12189         ret = get_errno(mq_unlink(p));
12190         unlock_user (p, arg1, 0);
12191         return ret;
12192 
12193 #ifdef TARGET_NR_mq_timedsend
12194     case TARGET_NR_mq_timedsend:
12195         {
12196             struct timespec ts;
12197 
12198             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12199             if (arg5 != 0) {
12200                 if (target_to_host_timespec(&ts, arg5)) {
12201                     return -TARGET_EFAULT;
12202                 }
12203                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12204                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12205                     return -TARGET_EFAULT;
12206                 }
12207             } else {
12208                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12209             }
12210             unlock_user (p, arg2, arg3);
12211         }
12212         return ret;
12213 #endif
12214 #ifdef TARGET_NR_mq_timedsend_time64
12215     case TARGET_NR_mq_timedsend_time64:
12216         {
12217             struct timespec ts;
12218 
12219             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12220             if (arg5 != 0) {
12221                 if (target_to_host_timespec64(&ts, arg5)) {
12222                     return -TARGET_EFAULT;
12223                 }
12224                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12225                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12226                     return -TARGET_EFAULT;
12227                 }
12228             } else {
12229                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12230             }
12231             unlock_user(p, arg2, arg3);
12232         }
12233         return ret;
12234 #endif
12235 
12236 #ifdef TARGET_NR_mq_timedreceive
12237     case TARGET_NR_mq_timedreceive:
12238         {
12239             struct timespec ts;
12240             unsigned int prio;
12241 
12242             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12243             if (arg5 != 0) {
12244                 if (target_to_host_timespec(&ts, arg5)) {
12245                     return -TARGET_EFAULT;
12246                 }
12247                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12248                                                      &prio, &ts));
12249                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12250                     return -TARGET_EFAULT;
12251                 }
12252             } else {
12253                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12254                                                      &prio, NULL));
12255             }
12256             unlock_user (p, arg2, arg3);
12257             if (arg4 != 0)
12258                 put_user_u32(prio, arg4);
12259         }
12260         return ret;
12261 #endif
12262 #ifdef TARGET_NR_mq_timedreceive_time64
12263     case TARGET_NR_mq_timedreceive_time64:
12264         {
12265             struct timespec ts;
12266             unsigned int prio;
12267 
12268             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12269             if (arg5 != 0) {
12270                 if (target_to_host_timespec64(&ts, arg5)) {
12271                     return -TARGET_EFAULT;
12272                 }
12273                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12274                                                      &prio, &ts));
12275                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12276                     return -TARGET_EFAULT;
12277                 }
12278             } else {
12279                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12280                                                      &prio, NULL));
12281             }
12282             unlock_user(p, arg2, arg3);
12283             if (arg4 != 0) {
12284                 put_user_u32(prio, arg4);
12285             }
12286         }
12287         return ret;
12288 #endif
12289 
12290     /* Not implemented for now... */
12291 /*     case TARGET_NR_mq_notify: */
12292 /*         break; */
12293 
12294     case TARGET_NR_mq_getsetattr:
12295         {
12296             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12297             ret = 0;
12298             if (arg2 != 0) {
12299                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12300                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12301                                            &posix_mq_attr_out));
12302             } else if (arg3 != 0) {
12303                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12304             }
12305             if (ret == 0 && arg3 != 0) {
12306                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12307             }
12308         }
12309         return ret;
12310 #endif
12311 
12312 #ifdef CONFIG_SPLICE
12313 #ifdef TARGET_NR_tee
12314     case TARGET_NR_tee:
12315         {
12316             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12317         }
12318         return ret;
12319 #endif
12320 #ifdef TARGET_NR_splice
12321     case TARGET_NR_splice:
12322         {
12323             loff_t loff_in, loff_out;
12324             loff_t *ploff_in = NULL, *ploff_out = NULL;
12325             if (arg2) {
12326                 if (get_user_u64(loff_in, arg2)) {
12327                     return -TARGET_EFAULT;
12328                 }
12329                 ploff_in = &loff_in;
12330             }
12331             if (arg4) {
12332                 if (get_user_u64(loff_out, arg4)) {
12333                     return -TARGET_EFAULT;
12334                 }
12335                 ploff_out = &loff_out;
12336             }
12337             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12338             if (arg2) {
12339                 if (put_user_u64(loff_in, arg2)) {
12340                     return -TARGET_EFAULT;
12341                 }
12342             }
12343             if (arg4) {
12344                 if (put_user_u64(loff_out, arg4)) {
12345                     return -TARGET_EFAULT;
12346                 }
12347             }
12348         }
12349         return ret;
12350 #endif
12351 #ifdef TARGET_NR_vmsplice
12352 	case TARGET_NR_vmsplice:
12353         {
12354             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12355             if (vec != NULL) {
12356                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12357                 unlock_iovec(vec, arg2, arg3, 0);
12358             } else {
12359                 ret = -host_to_target_errno(errno);
12360             }
12361         }
12362         return ret;
12363 #endif
12364 #endif /* CONFIG_SPLICE */
12365 #ifdef CONFIG_EVENTFD
12366 #if defined(TARGET_NR_eventfd)
12367     case TARGET_NR_eventfd:
12368         ret = get_errno(eventfd(arg1, 0));
12369         if (ret >= 0) {
12370             fd_trans_register(ret, &target_eventfd_trans);
12371         }
12372         return ret;
12373 #endif
12374 #if defined(TARGET_NR_eventfd2)
12375     case TARGET_NR_eventfd2:
12376     {
12377         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12378         if (arg2 & TARGET_O_NONBLOCK) {
12379             host_flags |= O_NONBLOCK;
12380         }
12381         if (arg2 & TARGET_O_CLOEXEC) {
12382             host_flags |= O_CLOEXEC;
12383         }
12384         ret = get_errno(eventfd(arg1, host_flags));
12385         if (ret >= 0) {
12386             fd_trans_register(ret, &target_eventfd_trans);
12387         }
12388         return ret;
12389     }
12390 #endif
12391 #endif /* CONFIG_EVENTFD  */
12392 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12393     case TARGET_NR_fallocate:
12394 #if TARGET_ABI_BITS == 32
12395         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12396                                   target_offset64(arg5, arg6)));
12397 #else
12398         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12399 #endif
12400         return ret;
12401 #endif
12402 #if defined(CONFIG_SYNC_FILE_RANGE)
12403 #if defined(TARGET_NR_sync_file_range)
12404     case TARGET_NR_sync_file_range:
12405 #if TARGET_ABI_BITS == 32
12406 #if defined(TARGET_MIPS)
12407         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12408                                         target_offset64(arg5, arg6), arg7));
12409 #else
12410         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12411                                         target_offset64(arg4, arg5), arg6));
12412 #endif /* !TARGET_MIPS */
12413 #else
12414         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12415 #endif
12416         return ret;
12417 #endif
12418 #if defined(TARGET_NR_sync_file_range2) || \
12419     defined(TARGET_NR_arm_sync_file_range)
12420 #if defined(TARGET_NR_sync_file_range2)
12421     case TARGET_NR_sync_file_range2:
12422 #endif
12423 #if defined(TARGET_NR_arm_sync_file_range)
12424     case TARGET_NR_arm_sync_file_range:
12425 #endif
12426         /* This is like sync_file_range but the arguments are reordered */
12427 #if TARGET_ABI_BITS == 32
12428         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12429                                         target_offset64(arg5, arg6), arg2));
12430 #else
12431         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12432 #endif
12433         return ret;
12434 #endif
12435 #endif
12436 #if defined(TARGET_NR_signalfd4)
12437     case TARGET_NR_signalfd4:
12438         return do_signalfd4(arg1, arg2, arg4);
12439 #endif
12440 #if defined(TARGET_NR_signalfd)
12441     case TARGET_NR_signalfd:
12442         return do_signalfd4(arg1, arg2, 0);
12443 #endif
12444 #if defined(CONFIG_EPOLL)
12445 #if defined(TARGET_NR_epoll_create)
12446     case TARGET_NR_epoll_create:
12447         return get_errno(epoll_create(arg1));
12448 #endif
12449 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12450     case TARGET_NR_epoll_create1:
12451         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12452 #endif
12453 #if defined(TARGET_NR_epoll_ctl)
12454     case TARGET_NR_epoll_ctl:
12455     {
12456         struct epoll_event ep;
12457         struct epoll_event *epp = 0;
12458         if (arg4) {
12459             if (arg2 != EPOLL_CTL_DEL) {
12460                 struct target_epoll_event *target_ep;
12461                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12462                     return -TARGET_EFAULT;
12463                 }
12464                 ep.events = tswap32(target_ep->events);
12465                 /*
12466                  * The epoll_data_t union is just opaque data to the kernel,
12467                  * so we transfer all 64 bits across and need not worry what
12468                  * actual data type it is.
12469                  */
12470                 ep.data.u64 = tswap64(target_ep->data.u64);
12471                 unlock_user_struct(target_ep, arg4, 0);
12472             }
12473             /*
12474              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12475              * non-null pointer, even though this argument is ignored.
12476              *
12477              */
12478             epp = &ep;
12479         }
12480         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12481     }
12482 #endif
12483 
12484 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12485 #if defined(TARGET_NR_epoll_wait)
12486     case TARGET_NR_epoll_wait:
12487 #endif
12488 #if defined(TARGET_NR_epoll_pwait)
12489     case TARGET_NR_epoll_pwait:
12490 #endif
12491     {
12492         struct target_epoll_event *target_ep;
12493         struct epoll_event *ep;
12494         int epfd = arg1;
12495         int maxevents = arg3;
12496         int timeout = arg4;
12497 
12498         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12499             return -TARGET_EINVAL;
12500         }
12501 
12502         target_ep = lock_user(VERIFY_WRITE, arg2,
12503                               maxevents * sizeof(struct target_epoll_event), 1);
12504         if (!target_ep) {
12505             return -TARGET_EFAULT;
12506         }
12507 
12508         ep = g_try_new(struct epoll_event, maxevents);
12509         if (!ep) {
12510             unlock_user(target_ep, arg2, 0);
12511             return -TARGET_ENOMEM;
12512         }
12513 
12514         switch (num) {
12515 #if defined(TARGET_NR_epoll_pwait)
12516         case TARGET_NR_epoll_pwait:
12517         {
12518             target_sigset_t *target_set;
12519             sigset_t _set, *set = &_set;
12520 
12521             if (arg5) {
12522                 if (arg6 != sizeof(target_sigset_t)) {
12523                     ret = -TARGET_EINVAL;
12524                     break;
12525                 }
12526 
12527                 target_set = lock_user(VERIFY_READ, arg5,
12528                                        sizeof(target_sigset_t), 1);
12529                 if (!target_set) {
12530                     ret = -TARGET_EFAULT;
12531                     break;
12532                 }
12533                 target_to_host_sigset(set, target_set);
12534                 unlock_user(target_set, arg5, 0);
12535             } else {
12536                 set = NULL;
12537             }
12538 
12539             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12540                                              set, SIGSET_T_SIZE));
12541             break;
12542         }
12543 #endif
12544 #if defined(TARGET_NR_epoll_wait)
12545         case TARGET_NR_epoll_wait:
12546             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12547                                              NULL, 0));
12548             break;
12549 #endif
12550         default:
12551             ret = -TARGET_ENOSYS;
12552         }
12553         if (!is_error(ret)) {
12554             int i;
12555             for (i = 0; i < ret; i++) {
12556                 target_ep[i].events = tswap32(ep[i].events);
12557                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12558             }
12559             unlock_user(target_ep, arg2,
12560                         ret * sizeof(struct target_epoll_event));
12561         } else {
12562             unlock_user(target_ep, arg2, 0);
12563         }
12564         g_free(ep);
12565         return ret;
12566     }
12567 #endif
12568 #endif
12569 #ifdef TARGET_NR_prlimit64
12570     case TARGET_NR_prlimit64:
12571     {
12572         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12573         struct target_rlimit64 *target_rnew, *target_rold;
12574         struct host_rlimit64 rnew, rold, *rnewp = 0;
12575         int resource = target_to_host_resource(arg2);
12576 
12577         if (arg3 && (resource != RLIMIT_AS &&
12578                      resource != RLIMIT_DATA &&
12579                      resource != RLIMIT_STACK)) {
12580             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12581                 return -TARGET_EFAULT;
12582             }
12583             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12584             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12585             unlock_user_struct(target_rnew, arg3, 0);
12586             rnewp = &rnew;
12587         }
12588 
12589         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12590         if (!is_error(ret) && arg4) {
12591             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12592                 return -TARGET_EFAULT;
12593             }
12594             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12595             target_rold->rlim_max = tswap64(rold.rlim_max);
12596             unlock_user_struct(target_rold, arg4, 1);
12597         }
12598         return ret;
12599     }
12600 #endif
12601 #ifdef TARGET_NR_gethostname
12602     case TARGET_NR_gethostname:
12603     {
12604         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12605         if (name) {
12606             ret = get_errno(gethostname(name, arg2));
12607             unlock_user(name, arg1, arg2);
12608         } else {
12609             ret = -TARGET_EFAULT;
12610         }
12611         return ret;
12612     }
12613 #endif
12614 #ifdef TARGET_NR_atomic_cmpxchg_32
12615     case TARGET_NR_atomic_cmpxchg_32:
12616     {
12617         /* should use start_exclusive from main.c */
12618         abi_ulong mem_value;
12619         if (get_user_u32(mem_value, arg6)) {
12620             target_siginfo_t info;
12621             info.si_signo = SIGSEGV;
12622             info.si_errno = 0;
12623             info.si_code = TARGET_SEGV_MAPERR;
12624             info._sifields._sigfault._addr = arg6;
12625             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12626                          QEMU_SI_FAULT, &info);
12627             ret = 0xdeadbeef;
12628 
12629         }
12630         if (mem_value == arg2)
12631             put_user_u32(arg1, arg6);
12632         return mem_value;
12633     }
12634 #endif
12635 #ifdef TARGET_NR_atomic_barrier
12636     case TARGET_NR_atomic_barrier:
12637         /* Like the kernel implementation and the
12638            qemu arm barrier, no-op this? */
12639         return 0;
12640 #endif
12641 
12642 #ifdef TARGET_NR_timer_create
12643     case TARGET_NR_timer_create:
12644     {
12645         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12646 
12647         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12648 
12649         int clkid = arg1;
12650         int timer_index = next_free_host_timer();
12651 
12652         if (timer_index < 0) {
12653             ret = -TARGET_EAGAIN;
12654         } else {
12655             timer_t *phtimer = g_posix_timers  + timer_index;
12656 
12657             if (arg2) {
12658                 phost_sevp = &host_sevp;
12659                 ret = target_to_host_sigevent(phost_sevp, arg2);
12660                 if (ret != 0) {
12661                     return ret;
12662                 }
12663             }
12664 
12665             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12666             if (ret) {
12667                 phtimer = NULL;
12668             } else {
12669                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12670                     return -TARGET_EFAULT;
12671                 }
12672             }
12673         }
12674         return ret;
12675     }
12676 #endif
12677 
12678 #ifdef TARGET_NR_timer_settime
12679     case TARGET_NR_timer_settime:
12680     {
12681         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12682          * struct itimerspec * old_value */
12683         target_timer_t timerid = get_timer_id(arg1);
12684 
12685         if (timerid < 0) {
12686             ret = timerid;
12687         } else if (arg3 == 0) {
12688             ret = -TARGET_EINVAL;
12689         } else {
12690             timer_t htimer = g_posix_timers[timerid];
12691             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12692 
12693             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12694                 return -TARGET_EFAULT;
12695             }
12696             ret = get_errno(
12697                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12698             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12699                 return -TARGET_EFAULT;
12700             }
12701         }
12702         return ret;
12703     }
12704 #endif
12705 
12706 #ifdef TARGET_NR_timer_settime64
12707     case TARGET_NR_timer_settime64:
12708     {
12709         target_timer_t timerid = get_timer_id(arg1);
12710 
12711         if (timerid < 0) {
12712             ret = timerid;
12713         } else if (arg3 == 0) {
12714             ret = -TARGET_EINVAL;
12715         } else {
12716             timer_t htimer = g_posix_timers[timerid];
12717             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12718 
12719             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12720                 return -TARGET_EFAULT;
12721             }
12722             ret = get_errno(
12723                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12724             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12725                 return -TARGET_EFAULT;
12726             }
12727         }
12728         return ret;
12729     }
12730 #endif
12731 
12732 #ifdef TARGET_NR_timer_gettime
12733     case TARGET_NR_timer_gettime:
12734     {
12735         /* args: timer_t timerid, struct itimerspec *curr_value */
12736         target_timer_t timerid = get_timer_id(arg1);
12737 
12738         if (timerid < 0) {
12739             ret = timerid;
12740         } else if (!arg2) {
12741             ret = -TARGET_EFAULT;
12742         } else {
12743             timer_t htimer = g_posix_timers[timerid];
12744             struct itimerspec hspec;
12745             ret = get_errno(timer_gettime(htimer, &hspec));
12746 
12747             if (host_to_target_itimerspec(arg2, &hspec)) {
12748                 ret = -TARGET_EFAULT;
12749             }
12750         }
12751         return ret;
12752     }
12753 #endif
12754 
12755 #ifdef TARGET_NR_timer_gettime64
12756     case TARGET_NR_timer_gettime64:
12757     {
12758         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12759         target_timer_t timerid = get_timer_id(arg1);
12760 
12761         if (timerid < 0) {
12762             ret = timerid;
12763         } else if (!arg2) {
12764             ret = -TARGET_EFAULT;
12765         } else {
12766             timer_t htimer = g_posix_timers[timerid];
12767             struct itimerspec hspec;
12768             ret = get_errno(timer_gettime(htimer, &hspec));
12769 
12770             if (host_to_target_itimerspec64(arg2, &hspec)) {
12771                 ret = -TARGET_EFAULT;
12772             }
12773         }
12774         return ret;
12775     }
12776 #endif
12777 
12778 #ifdef TARGET_NR_timer_getoverrun
12779     case TARGET_NR_timer_getoverrun:
12780     {
12781         /* args: timer_t timerid */
12782         target_timer_t timerid = get_timer_id(arg1);
12783 
12784         if (timerid < 0) {
12785             ret = timerid;
12786         } else {
12787             timer_t htimer = g_posix_timers[timerid];
12788             ret = get_errno(timer_getoverrun(htimer));
12789         }
12790         return ret;
12791     }
12792 #endif
12793 
12794 #ifdef TARGET_NR_timer_delete
12795     case TARGET_NR_timer_delete:
12796     {
12797         /* args: timer_t timerid */
12798         target_timer_t timerid = get_timer_id(arg1);
12799 
12800         if (timerid < 0) {
12801             ret = timerid;
12802         } else {
12803             timer_t htimer = g_posix_timers[timerid];
12804             ret = get_errno(timer_delete(htimer));
12805             g_posix_timers[timerid] = 0;
12806         }
12807         return ret;
12808     }
12809 #endif
12810 
12811 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12812     case TARGET_NR_timerfd_create:
12813         return get_errno(timerfd_create(arg1,
12814                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12815 #endif
12816 
12817 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12818     case TARGET_NR_timerfd_gettime:
12819         {
12820             struct itimerspec its_curr;
12821 
12822             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12823 
12824             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12825                 return -TARGET_EFAULT;
12826             }
12827         }
12828         return ret;
12829 #endif
12830 
12831 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12832     case TARGET_NR_timerfd_gettime64:
12833         {
12834             struct itimerspec its_curr;
12835 
12836             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12837 
12838             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12839                 return -TARGET_EFAULT;
12840             }
12841         }
12842         return ret;
12843 #endif
12844 
12845 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12846     case TARGET_NR_timerfd_settime:
12847         {
12848             struct itimerspec its_new, its_old, *p_new;
12849 
12850             if (arg3) {
12851                 if (target_to_host_itimerspec(&its_new, arg3)) {
12852                     return -TARGET_EFAULT;
12853                 }
12854                 p_new = &its_new;
12855             } else {
12856                 p_new = NULL;
12857             }
12858 
12859             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12860 
12861             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12862                 return -TARGET_EFAULT;
12863             }
12864         }
12865         return ret;
12866 #endif
12867 
12868 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12869     case TARGET_NR_timerfd_settime64:
12870         {
12871             struct itimerspec its_new, its_old, *p_new;
12872 
12873             if (arg3) {
12874                 if (target_to_host_itimerspec64(&its_new, arg3)) {
12875                     return -TARGET_EFAULT;
12876                 }
12877                 p_new = &its_new;
12878             } else {
12879                 p_new = NULL;
12880             }
12881 
12882             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12883 
12884             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12885                 return -TARGET_EFAULT;
12886             }
12887         }
12888         return ret;
12889 #endif
12890 
12891 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12892     case TARGET_NR_ioprio_get:
12893         return get_errno(ioprio_get(arg1, arg2));
12894 #endif
12895 
12896 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12897     case TARGET_NR_ioprio_set:
12898         return get_errno(ioprio_set(arg1, arg2, arg3));
12899 #endif
12900 
12901 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12902     case TARGET_NR_setns:
12903         return get_errno(setns(arg1, arg2));
12904 #endif
12905 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12906     case TARGET_NR_unshare:
12907         return get_errno(unshare(arg1));
12908 #endif
12909 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12910     case TARGET_NR_kcmp:
12911         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12912 #endif
12913 #ifdef TARGET_NR_swapcontext
12914     case TARGET_NR_swapcontext:
12915         /* PowerPC specific.  */
12916         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12917 #endif
12918 #ifdef TARGET_NR_memfd_create
12919     case TARGET_NR_memfd_create:
12920         p = lock_user_string(arg1);
12921         if (!p) {
12922             return -TARGET_EFAULT;
12923         }
12924         ret = get_errno(memfd_create(p, arg2));
12925         fd_trans_unregister(ret);
12926         unlock_user(p, arg1, 0);
12927         return ret;
12928 #endif
12929 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12930     case TARGET_NR_membarrier:
12931         return get_errno(membarrier(arg1, arg2));
12932 #endif
12933 
12934 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
12935     case TARGET_NR_copy_file_range:
12936         {
12937             loff_t inoff, outoff;
12938             loff_t *pinoff = NULL, *poutoff = NULL;
12939 
12940             if (arg2) {
12941                 if (get_user_u64(inoff, arg2)) {
12942                     return -TARGET_EFAULT;
12943                 }
12944                 pinoff = &inoff;
12945             }
12946             if (arg4) {
12947                 if (get_user_u64(outoff, arg4)) {
12948                     return -TARGET_EFAULT;
12949                 }
12950                 poutoff = &outoff;
12951             }
12952             /* Do not sign-extend the count parameter. */
12953             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
12954                                                  (abi_ulong)arg5, arg6));
12955             if (!is_error(ret) && ret > 0) {
12956                 if (arg2) {
12957                     if (put_user_u64(inoff, arg2)) {
12958                         return -TARGET_EFAULT;
12959                     }
12960                 }
12961                 if (arg4) {
12962                     if (put_user_u64(outoff, arg4)) {
12963                         return -TARGET_EFAULT;
12964                     }
12965                 }
12966             }
12967         }
12968         return ret;
12969 #endif
12970 
12971 #if defined(TARGET_NR_pivot_root)
12972     case TARGET_NR_pivot_root:
12973         {
12974             void *p2;
12975             p = lock_user_string(arg1); /* new_root */
12976             p2 = lock_user_string(arg2); /* put_old */
12977             if (!p || !p2) {
12978                 ret = -TARGET_EFAULT;
12979             } else {
12980                 ret = get_errno(pivot_root(p, p2));
12981             }
12982             unlock_user(p2, arg2, 0);
12983             unlock_user(p, arg1, 0);
12984         }
12985         return ret;
12986 #endif
12987 
12988     default:
12989         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
12990         return -TARGET_ENOSYS;
12991     }
12992     return ret;
12993 }
12994 
12995 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
12996                     abi_long arg2, abi_long arg3, abi_long arg4,
12997                     abi_long arg5, abi_long arg6, abi_long arg7,
12998                     abi_long arg8)
12999 {
13000     CPUState *cpu = env_cpu(cpu_env);
13001     abi_long ret;
13002 
13003 #ifdef DEBUG_ERESTARTSYS
13004     /* Debug-only code for exercising the syscall-restart code paths
13005      * in the per-architecture cpu main loops: restart every syscall
13006      * the guest makes once before letting it through.
13007      */
13008     {
13009         static bool flag;
13010         flag = !flag;
13011         if (flag) {
13012             return -QEMU_ERESTARTSYS;
13013         }
13014     }
13015 #endif
13016 
13017     record_syscall_start(cpu, num, arg1,
13018                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13019 
13020     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13021         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13022     }
13023 
13024     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13025                       arg5, arg6, arg7, arg8);
13026 
13027     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13028         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13029                           arg3, arg4, arg5, arg6);
13030     }
13031 
13032     record_syscall_return(cpu, num, ret);
13033     return ret;
13034 }
13035