xref: /openbmc/qemu/linux-user/syscall.c (revision 220717a6)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_inotify_init __NR_inotify_init
276 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
277 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
278 #define __NR_sys_statx __NR_statx
279 
280 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
281 #define __NR__llseek __NR_lseek
282 #endif
283 
284 /* Newer kernel ports have llseek() instead of _llseek() */
285 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
286 #define TARGET_NR__llseek TARGET_NR_llseek
287 #endif
288 
289 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
290 #ifndef TARGET_O_NONBLOCK_MASK
291 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
292 #endif
293 
294 #define __NR_sys_gettid __NR_gettid
295 _syscall0(int, sys_gettid)
296 
297 /* For the 64-bit guest on 32-bit host case we must emulate
298  * getdents using getdents64, because otherwise the host
299  * might hand us back more dirent records than we can fit
300  * into the guest buffer after structure format conversion.
301  * Otherwise we emulate getdents with getdents if the host has it.
302  */
303 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
304 #define EMULATE_GETDENTS_WITH_GETDENTS
305 #endif
306 
307 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
308 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
309 #endif
310 #if (defined(TARGET_NR_getdents) && \
311       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
312     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
313 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
314 #endif
315 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
316 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
317           loff_t *, res, uint, wh);
318 #endif
319 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
320 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
321           siginfo_t *, uinfo)
322 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
323 #ifdef __NR_exit_group
324 _syscall1(int,exit_group,int,error_code)
325 #endif
326 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
327 _syscall1(int,set_tid_address,int *,tidptr)
328 #endif
329 #if defined(__NR_futex)
330 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
331           const struct timespec *,timeout,int *,uaddr2,int,val3)
332 #endif
333 #if defined(__NR_futex_time64)
334 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
335           const struct timespec *,timeout,int *,uaddr2,int,val3)
336 #endif
337 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
338 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
341 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
342           unsigned long *, user_mask_ptr);
343 #define __NR_sys_getcpu __NR_getcpu
344 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
345 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
346           void *, arg);
347 _syscall2(int, capget, struct __user_cap_header_struct *, header,
348           struct __user_cap_data_struct *, data);
349 _syscall2(int, capset, struct __user_cap_header_struct *, header,
350           struct __user_cap_data_struct *, data);
351 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
352 _syscall2(int, ioprio_get, int, which, int, who)
353 #endif
354 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
355 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
356 #endif
357 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
358 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
359 #endif
360 
361 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
362 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
363           unsigned long, idx1, unsigned long, idx2)
364 #endif
365 
366 /*
367  * It is assumed that struct statx is architecture independent.
368  */
369 #if defined(TARGET_NR_statx) && defined(__NR_statx)
370 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
371           unsigned int, mask, struct target_statx *, statxbuf)
372 #endif
373 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
374 _syscall2(int, membarrier, int, cmd, int, flags)
375 #endif
376 
377 static const bitmask_transtbl fcntl_flags_tbl[] = {
378   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
379   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
380   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
381   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
382   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
383   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
384   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
385   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
386   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
387   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
388   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
389   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
390   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
391 #if defined(O_DIRECT)
392   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
393 #endif
394 #if defined(O_NOATIME)
395   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
396 #endif
397 #if defined(O_CLOEXEC)
398   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
399 #endif
400 #if defined(O_PATH)
401   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
402 #endif
403 #if defined(O_TMPFILE)
404   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
405 #endif
406   /* Don't terminate the list prematurely on 64-bit host+guest.  */
407 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
408   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
409 #endif
410   { 0, 0, 0, 0 }
411 };
412 
413 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
414 
415 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
416 #if defined(__NR_utimensat)
417 #define __NR_sys_utimensat __NR_utimensat
418 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
419           const struct timespec *,tsp,int,flags)
420 #else
421 static int sys_utimensat(int dirfd, const char *pathname,
422                          const struct timespec times[2], int flags)
423 {
424     errno = ENOSYS;
425     return -1;
426 }
427 #endif
428 #endif /* TARGET_NR_utimensat */
429 
430 #ifdef TARGET_NR_renameat2
431 #if defined(__NR_renameat2)
432 #define __NR_sys_renameat2 __NR_renameat2
433 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
434           const char *, new, unsigned int, flags)
435 #else
436 static int sys_renameat2(int oldfd, const char *old,
437                          int newfd, const char *new, int flags)
438 {
439     if (flags == 0) {
440         return renameat(oldfd, old, newfd, new);
441     }
442     errno = ENOSYS;
443     return -1;
444 }
445 #endif
446 #endif /* TARGET_NR_renameat2 */
447 
448 #ifdef CONFIG_INOTIFY
449 #include <sys/inotify.h>
450 
451 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
452 static int sys_inotify_init(void)
453 {
454   return (inotify_init());
455 }
456 #endif
457 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
458 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
459 {
460   return (inotify_add_watch(fd, pathname, mask));
461 }
462 #endif
463 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
464 static int sys_inotify_rm_watch(int fd, int32_t wd)
465 {
466   return (inotify_rm_watch(fd, wd));
467 }
468 #endif
469 #ifdef CONFIG_INOTIFY1
470 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
471 static int sys_inotify_init1(int flags)
472 {
473   return (inotify_init1(flags));
474 }
475 #endif
476 #endif
477 #else
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY  */
484 
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
488 #endif
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64 {
492     uint64_t rlim_cur;
493     uint64_t rlim_max;
494 };
495 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
496           const struct host_rlimit64 *, new_limit,
497           struct host_rlimit64 *, old_limit)
498 #endif
499 
500 
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers[32] = { 0, } ;
504 
505 static inline int next_free_host_timer(void)
506 {
507     int k ;
508     /* FIXME: Does finding the next free slot require a lock? */
509     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
510         if (g_posix_timers[k] == 0) {
511             g_posix_timers[k] = (timer_t) 1;
512             return k;
513         }
514     }
515     return -1;
516 }
517 #endif
518 
519 static inline int host_to_target_errno(int host_errno)
520 {
521     switch (host_errno) {
522 #define E(X)  case X: return TARGET_##X;
523 #include "errnos.c.inc"
524 #undef E
525     default:
526         return host_errno;
527     }
528 }
529 
530 static inline int target_to_host_errno(int target_errno)
531 {
532     switch (target_errno) {
533 #define E(X)  case TARGET_##X: return X;
534 #include "errnos.c.inc"
535 #undef E
536     default:
537         return target_errno;
538     }
539 }
540 
541 static inline abi_long get_errno(abi_long ret)
542 {
543     if (ret == -1)
544         return -host_to_target_errno(errno);
545     else
546         return ret;
547 }
548 
549 const char *target_strerror(int err)
550 {
551     if (err == QEMU_ERESTARTSYS) {
552         return "To be restarted";
553     }
554     if (err == QEMU_ESIGRETURN) {
555         return "Successful exit from sigreturn";
556     }
557 
558     return strerror(target_to_host_errno(err));
559 }
560 
561 #define safe_syscall0(type, name) \
562 static type safe_##name(void) \
563 { \
564     return safe_syscall(__NR_##name); \
565 }
566 
567 #define safe_syscall1(type, name, type1, arg1) \
568 static type safe_##name(type1 arg1) \
569 { \
570     return safe_syscall(__NR_##name, arg1); \
571 }
572 
573 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
574 static type safe_##name(type1 arg1, type2 arg2) \
575 { \
576     return safe_syscall(__NR_##name, arg1, arg2); \
577 }
578 
579 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
580 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
581 { \
582     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
583 }
584 
585 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
586     type4, arg4) \
587 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
588 { \
589     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
590 }
591 
592 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
593     type4, arg4, type5, arg5) \
594 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
595     type5 arg5) \
596 { \
597     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
598 }
599 
600 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
601     type4, arg4, type5, arg5, type6, arg6) \
602 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
603     type5 arg5, type6 arg6) \
604 { \
605     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
606 }
607 
608 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
609 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
610 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
611               int, flags, mode_t, mode)
612 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
613 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
614               struct rusage *, rusage)
615 #endif
616 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
617               int, options, struct rusage *, rusage)
618 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
619 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
620     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
621 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
622               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
623 #endif
624 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
625 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
626               struct timespec *, tsp, const sigset_t *, sigmask,
627               size_t, sigsetsize)
628 #endif
629 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
630               int, maxevents, int, timeout, const sigset_t *, sigmask,
631               size_t, sigsetsize)
632 #if defined(__NR_futex)
633 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
634               const struct timespec *,timeout,int *,uaddr2,int,val3)
635 #endif
636 #if defined(__NR_futex_time64)
637 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
638               const struct timespec *,timeout,int *,uaddr2,int,val3)
639 #endif
640 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
641 safe_syscall2(int, kill, pid_t, pid, int, sig)
642 safe_syscall2(int, tkill, int, tid, int, sig)
643 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
644 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
645 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
646 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
647               unsigned long, pos_l, unsigned long, pos_h)
648 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
649               unsigned long, pos_l, unsigned long, pos_h)
650 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
651               socklen_t, addrlen)
652 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
653               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
654 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
655               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
656 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
657 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
658 safe_syscall2(int, flock, int, fd, int, operation)
659 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
660 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
661               const struct timespec *, uts, size_t, sigsetsize)
662 #endif
663 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
664               int, flags)
665 #if defined(TARGET_NR_nanosleep)
666 safe_syscall2(int, nanosleep, const struct timespec *, req,
667               struct timespec *, rem)
668 #endif
669 #if defined(TARGET_NR_clock_nanosleep) || \
670     defined(TARGET_NR_clock_nanosleep_time64)
671 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
672               const struct timespec *, req, struct timespec *, rem)
673 #endif
674 #ifdef __NR_ipc
675 #ifdef __s390x__
676 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
677               void *, ptr)
678 #else
679 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
680               void *, ptr, long, fifth)
681 #endif
682 #endif
683 #ifdef __NR_msgsnd
684 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
685               int, flags)
686 #endif
687 #ifdef __NR_msgrcv
688 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
689               long, msgtype, int, flags)
690 #endif
691 #ifdef __NR_semtimedop
692 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
693               unsigned, nsops, const struct timespec *, timeout)
694 #endif
695 #if defined(TARGET_NR_mq_timedsend) || \
696     defined(TARGET_NR_mq_timedsend_time64)
697 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
698               size_t, len, unsigned, prio, const struct timespec *, timeout)
699 #endif
700 #if defined(TARGET_NR_mq_timedreceive) || \
701     defined(TARGET_NR_mq_timedreceive_time64)
702 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
703               size_t, len, unsigned *, prio, const struct timespec *, timeout)
704 #endif
705 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
706 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
707               int, outfd, loff_t *, poutoff, size_t, length,
708               unsigned int, flags)
709 #endif
710 
711 /* We do ioctl like this rather than via safe_syscall3 to preserve the
712  * "third argument might be integer or pointer or not present" behaviour of
713  * the libc function.
714  */
715 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
716 /* Similarly for fcntl. Note that callers must always:
717  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
718  *  use the flock64 struct rather than unsuffixed flock
719  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
720  */
721 #ifdef __NR_fcntl64
722 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
723 #else
724 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
725 #endif
726 
727 static inline int host_to_target_sock_type(int host_type)
728 {
729     int target_type;
730 
731     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
732     case SOCK_DGRAM:
733         target_type = TARGET_SOCK_DGRAM;
734         break;
735     case SOCK_STREAM:
736         target_type = TARGET_SOCK_STREAM;
737         break;
738     default:
739         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
740         break;
741     }
742 
743 #if defined(SOCK_CLOEXEC)
744     if (host_type & SOCK_CLOEXEC) {
745         target_type |= TARGET_SOCK_CLOEXEC;
746     }
747 #endif
748 
749 #if defined(SOCK_NONBLOCK)
750     if (host_type & SOCK_NONBLOCK) {
751         target_type |= TARGET_SOCK_NONBLOCK;
752     }
753 #endif
754 
755     return target_type;
756 }
757 
758 static abi_ulong target_brk;
759 static abi_ulong target_original_brk;
760 static abi_ulong brk_page;
761 
762 void target_set_brk(abi_ulong new_brk)
763 {
764     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
765     brk_page = HOST_PAGE_ALIGN(target_brk);
766 }
767 
768 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
769 #define DEBUGF_BRK(message, args...)
770 
771 /* do_brk() must return target values and target errnos. */
772 abi_long do_brk(abi_ulong new_brk)
773 {
774     abi_long mapped_addr;
775     abi_ulong new_alloc_size;
776 
777     /* brk pointers are always untagged */
778 
779     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
780 
781     if (!new_brk) {
782         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
783         return target_brk;
784     }
785     if (new_brk < target_original_brk) {
786         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
787                    target_brk);
788         return target_brk;
789     }
790 
791     /* If the new brk is less than the highest page reserved to the
792      * target heap allocation, set it and we're almost done...  */
793     if (new_brk <= brk_page) {
794         /* Heap contents are initialized to zero, as for anonymous
795          * mapped pages.  */
796         if (new_brk > target_brk) {
797             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
798         }
799 	target_brk = new_brk;
800         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
801 	return target_brk;
802     }
803 
804     /* We need to allocate more memory after the brk... Note that
805      * we don't use MAP_FIXED because that will map over the top of
806      * any existing mapping (like the one with the host libc or qemu
807      * itself); instead we treat "mapped but at wrong address" as
808      * a failure and unmap again.
809      */
810     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
811     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
812                                         PROT_READ|PROT_WRITE,
813                                         MAP_ANON|MAP_PRIVATE, 0, 0));
814 
815     if (mapped_addr == brk_page) {
816         /* Heap contents are initialized to zero, as for anonymous
817          * mapped pages.  Technically the new pages are already
818          * initialized to zero since they *are* anonymous mapped
819          * pages, however we have to take care with the contents that
820          * come from the remaining part of the previous page: it may
821          * contains garbage data due to a previous heap usage (grown
822          * then shrunken).  */
823         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
824 
825         target_brk = new_brk;
826         brk_page = HOST_PAGE_ALIGN(target_brk);
827         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
828             target_brk);
829         return target_brk;
830     } else if (mapped_addr != -1) {
831         /* Mapped but at wrong address, meaning there wasn't actually
832          * enough space for this brk.
833          */
834         target_munmap(mapped_addr, new_alloc_size);
835         mapped_addr = -1;
836         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
837     }
838     else {
839         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
840     }
841 
842 #if defined(TARGET_ALPHA)
843     /* We (partially) emulate OSF/1 on Alpha, which requires we
844        return a proper errno, not an unchanged brk value.  */
845     return -TARGET_ENOMEM;
846 #endif
847     /* For everything else, return the previous break. */
848     return target_brk;
849 }
850 
851 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
852     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
853 static inline abi_long copy_from_user_fdset(fd_set *fds,
854                                             abi_ulong target_fds_addr,
855                                             int n)
856 {
857     int i, nw, j, k;
858     abi_ulong b, *target_fds;
859 
860     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
861     if (!(target_fds = lock_user(VERIFY_READ,
862                                  target_fds_addr,
863                                  sizeof(abi_ulong) * nw,
864                                  1)))
865         return -TARGET_EFAULT;
866 
867     FD_ZERO(fds);
868     k = 0;
869     for (i = 0; i < nw; i++) {
870         /* grab the abi_ulong */
871         __get_user(b, &target_fds[i]);
872         for (j = 0; j < TARGET_ABI_BITS; j++) {
873             /* check the bit inside the abi_ulong */
874             if ((b >> j) & 1)
875                 FD_SET(k, fds);
876             k++;
877         }
878     }
879 
880     unlock_user(target_fds, target_fds_addr, 0);
881 
882     return 0;
883 }
884 
885 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
886                                                  abi_ulong target_fds_addr,
887                                                  int n)
888 {
889     if (target_fds_addr) {
890         if (copy_from_user_fdset(fds, target_fds_addr, n))
891             return -TARGET_EFAULT;
892         *fds_ptr = fds;
893     } else {
894         *fds_ptr = NULL;
895     }
896     return 0;
897 }
898 
899 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
900                                           const fd_set *fds,
901                                           int n)
902 {
903     int i, nw, j, k;
904     abi_long v;
905     abi_ulong *target_fds;
906 
907     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
908     if (!(target_fds = lock_user(VERIFY_WRITE,
909                                  target_fds_addr,
910                                  sizeof(abi_ulong) * nw,
911                                  0)))
912         return -TARGET_EFAULT;
913 
914     k = 0;
915     for (i = 0; i < nw; i++) {
916         v = 0;
917         for (j = 0; j < TARGET_ABI_BITS; j++) {
918             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
919             k++;
920         }
921         __put_user(v, &target_fds[i]);
922     }
923 
924     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
925 
926     return 0;
927 }
928 #endif
929 
930 #if defined(__alpha__)
931 #define HOST_HZ 1024
932 #else
933 #define HOST_HZ 100
934 #endif
935 
936 static inline abi_long host_to_target_clock_t(long ticks)
937 {
938 #if HOST_HZ == TARGET_HZ
939     return ticks;
940 #else
941     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
942 #endif
943 }
944 
945 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
946                                              const struct rusage *rusage)
947 {
948     struct target_rusage *target_rusage;
949 
950     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
951         return -TARGET_EFAULT;
952     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
953     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
954     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
955     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
956     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
957     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
958     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
959     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
960     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
961     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
962     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
963     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
964     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
965     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
966     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
967     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
968     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
969     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
970     unlock_user_struct(target_rusage, target_addr, 1);
971 
972     return 0;
973 }
974 
975 #ifdef TARGET_NR_setrlimit
976 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
977 {
978     abi_ulong target_rlim_swap;
979     rlim_t result;
980 
981     target_rlim_swap = tswapal(target_rlim);
982     if (target_rlim_swap == TARGET_RLIM_INFINITY)
983         return RLIM_INFINITY;
984 
985     result = target_rlim_swap;
986     if (target_rlim_swap != (rlim_t)result)
987         return RLIM_INFINITY;
988 
989     return result;
990 }
991 #endif
992 
993 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
994 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
995 {
996     abi_ulong target_rlim_swap;
997     abi_ulong result;
998 
999     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1000         target_rlim_swap = TARGET_RLIM_INFINITY;
1001     else
1002         target_rlim_swap = rlim;
1003     result = tswapal(target_rlim_swap);
1004 
1005     return result;
1006 }
1007 #endif
1008 
1009 static inline int target_to_host_resource(int code)
1010 {
1011     switch (code) {
1012     case TARGET_RLIMIT_AS:
1013         return RLIMIT_AS;
1014     case TARGET_RLIMIT_CORE:
1015         return RLIMIT_CORE;
1016     case TARGET_RLIMIT_CPU:
1017         return RLIMIT_CPU;
1018     case TARGET_RLIMIT_DATA:
1019         return RLIMIT_DATA;
1020     case TARGET_RLIMIT_FSIZE:
1021         return RLIMIT_FSIZE;
1022     case TARGET_RLIMIT_LOCKS:
1023         return RLIMIT_LOCKS;
1024     case TARGET_RLIMIT_MEMLOCK:
1025         return RLIMIT_MEMLOCK;
1026     case TARGET_RLIMIT_MSGQUEUE:
1027         return RLIMIT_MSGQUEUE;
1028     case TARGET_RLIMIT_NICE:
1029         return RLIMIT_NICE;
1030     case TARGET_RLIMIT_NOFILE:
1031         return RLIMIT_NOFILE;
1032     case TARGET_RLIMIT_NPROC:
1033         return RLIMIT_NPROC;
1034     case TARGET_RLIMIT_RSS:
1035         return RLIMIT_RSS;
1036     case TARGET_RLIMIT_RTPRIO:
1037         return RLIMIT_RTPRIO;
1038     case TARGET_RLIMIT_SIGPENDING:
1039         return RLIMIT_SIGPENDING;
1040     case TARGET_RLIMIT_STACK:
1041         return RLIMIT_STACK;
1042     default:
1043         return code;
1044     }
1045 }
1046 
1047 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1048                                               abi_ulong target_tv_addr)
1049 {
1050     struct target_timeval *target_tv;
1051 
1052     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1053         return -TARGET_EFAULT;
1054     }
1055 
1056     __get_user(tv->tv_sec, &target_tv->tv_sec);
1057     __get_user(tv->tv_usec, &target_tv->tv_usec);
1058 
1059     unlock_user_struct(target_tv, target_tv_addr, 0);
1060 
1061     return 0;
1062 }
1063 
1064 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1065                                             const struct timeval *tv)
1066 {
1067     struct target_timeval *target_tv;
1068 
1069     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1070         return -TARGET_EFAULT;
1071     }
1072 
1073     __put_user(tv->tv_sec, &target_tv->tv_sec);
1074     __put_user(tv->tv_usec, &target_tv->tv_usec);
1075 
1076     unlock_user_struct(target_tv, target_tv_addr, 1);
1077 
1078     return 0;
1079 }
1080 
1081 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1082 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1083                                                 abi_ulong target_tv_addr)
1084 {
1085     struct target__kernel_sock_timeval *target_tv;
1086 
1087     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1088         return -TARGET_EFAULT;
1089     }
1090 
1091     __get_user(tv->tv_sec, &target_tv->tv_sec);
1092     __get_user(tv->tv_usec, &target_tv->tv_usec);
1093 
1094     unlock_user_struct(target_tv, target_tv_addr, 0);
1095 
1096     return 0;
1097 }
1098 #endif
1099 
1100 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1101                                               const struct timeval *tv)
1102 {
1103     struct target__kernel_sock_timeval *target_tv;
1104 
1105     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1106         return -TARGET_EFAULT;
1107     }
1108 
1109     __put_user(tv->tv_sec, &target_tv->tv_sec);
1110     __put_user(tv->tv_usec, &target_tv->tv_usec);
1111 
1112     unlock_user_struct(target_tv, target_tv_addr, 1);
1113 
1114     return 0;
1115 }
1116 
1117 #if defined(TARGET_NR_futex) || \
1118     defined(TARGET_NR_rt_sigtimedwait) || \
1119     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1120     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1121     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1122     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1123     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1124     defined(TARGET_NR_timer_settime) || \
1125     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1126 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1127                                                abi_ulong target_addr)
1128 {
1129     struct target_timespec *target_ts;
1130 
1131     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1132         return -TARGET_EFAULT;
1133     }
1134     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1135     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1136     unlock_user_struct(target_ts, target_addr, 0);
1137     return 0;
1138 }
1139 #endif
1140 
1141 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1142     defined(TARGET_NR_timer_settime64) || \
1143     defined(TARGET_NR_mq_timedsend_time64) || \
1144     defined(TARGET_NR_mq_timedreceive_time64) || \
1145     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1146     defined(TARGET_NR_clock_nanosleep_time64) || \
1147     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1148     defined(TARGET_NR_utimensat) || \
1149     defined(TARGET_NR_utimensat_time64) || \
1150     defined(TARGET_NR_semtimedop_time64) || \
1151     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1152 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1153                                                  abi_ulong target_addr)
1154 {
1155     struct target__kernel_timespec *target_ts;
1156 
1157     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1158         return -TARGET_EFAULT;
1159     }
1160     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1161     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1162     /* in 32bit mode, this drops the padding */
1163     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1164     unlock_user_struct(target_ts, target_addr, 0);
1165     return 0;
1166 }
1167 #endif
1168 
1169 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1170                                                struct timespec *host_ts)
1171 {
1172     struct target_timespec *target_ts;
1173 
1174     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1175         return -TARGET_EFAULT;
1176     }
1177     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1178     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179     unlock_user_struct(target_ts, target_addr, 1);
1180     return 0;
1181 }
1182 
1183 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1184                                                  struct timespec *host_ts)
1185 {
1186     struct target__kernel_timespec *target_ts;
1187 
1188     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1189         return -TARGET_EFAULT;
1190     }
1191     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1192     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1193     unlock_user_struct(target_ts, target_addr, 1);
1194     return 0;
1195 }
1196 
1197 #if defined(TARGET_NR_gettimeofday)
1198 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1199                                              struct timezone *tz)
1200 {
1201     struct target_timezone *target_tz;
1202 
1203     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1204         return -TARGET_EFAULT;
1205     }
1206 
1207     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1208     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1209 
1210     unlock_user_struct(target_tz, target_tz_addr, 1);
1211 
1212     return 0;
1213 }
1214 #endif
1215 
1216 #if defined(TARGET_NR_settimeofday)
1217 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1218                                                abi_ulong target_tz_addr)
1219 {
1220     struct target_timezone *target_tz;
1221 
1222     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1223         return -TARGET_EFAULT;
1224     }
1225 
1226     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1227     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1228 
1229     unlock_user_struct(target_tz, target_tz_addr, 0);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1236 #include <mqueue.h>
1237 
1238 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1239                                               abi_ulong target_mq_attr_addr)
1240 {
1241     struct target_mq_attr *target_mq_attr;
1242 
1243     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1244                           target_mq_attr_addr, 1))
1245         return -TARGET_EFAULT;
1246 
1247     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1248     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1249     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1250     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1251 
1252     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1253 
1254     return 0;
1255 }
1256 
1257 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1258                                             const struct mq_attr *attr)
1259 {
1260     struct target_mq_attr *target_mq_attr;
1261 
1262     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1263                           target_mq_attr_addr, 0))
1264         return -TARGET_EFAULT;
1265 
1266     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1267     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1268     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1269     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1270 
1271     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1272 
1273     return 0;
1274 }
1275 #endif
1276 
1277 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1278 /* do_select() must return target values and target errnos. */
1279 static abi_long do_select(int n,
1280                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1281                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1282 {
1283     fd_set rfds, wfds, efds;
1284     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1285     struct timeval tv;
1286     struct timespec ts, *ts_ptr;
1287     abi_long ret;
1288 
1289     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1290     if (ret) {
1291         return ret;
1292     }
1293     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1294     if (ret) {
1295         return ret;
1296     }
1297     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1298     if (ret) {
1299         return ret;
1300     }
1301 
1302     if (target_tv_addr) {
1303         if (copy_from_user_timeval(&tv, target_tv_addr))
1304             return -TARGET_EFAULT;
1305         ts.tv_sec = tv.tv_sec;
1306         ts.tv_nsec = tv.tv_usec * 1000;
1307         ts_ptr = &ts;
1308     } else {
1309         ts_ptr = NULL;
1310     }
1311 
1312     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1313                                   ts_ptr, NULL));
1314 
1315     if (!is_error(ret)) {
1316         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1317             return -TARGET_EFAULT;
1318         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1319             return -TARGET_EFAULT;
1320         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1321             return -TARGET_EFAULT;
1322 
1323         if (target_tv_addr) {
1324             tv.tv_sec = ts.tv_sec;
1325             tv.tv_usec = ts.tv_nsec / 1000;
1326             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1327                 return -TARGET_EFAULT;
1328             }
1329         }
1330     }
1331 
1332     return ret;
1333 }
1334 
1335 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1336 static abi_long do_old_select(abi_ulong arg1)
1337 {
1338     struct target_sel_arg_struct *sel;
1339     abi_ulong inp, outp, exp, tvp;
1340     long nsel;
1341 
1342     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1343         return -TARGET_EFAULT;
1344     }
1345 
1346     nsel = tswapal(sel->n);
1347     inp = tswapal(sel->inp);
1348     outp = tswapal(sel->outp);
1349     exp = tswapal(sel->exp);
1350     tvp = tswapal(sel->tvp);
1351 
1352     unlock_user_struct(sel, arg1, 0);
1353 
1354     return do_select(nsel, inp, outp, exp, tvp);
1355 }
1356 #endif
1357 #endif
1358 
1359 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1360 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1361                             abi_long arg4, abi_long arg5, abi_long arg6,
1362                             bool time64)
1363 {
1364     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1365     fd_set rfds, wfds, efds;
1366     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1367     struct timespec ts, *ts_ptr;
1368     abi_long ret;
1369 
1370     /*
1371      * The 6th arg is actually two args smashed together,
1372      * so we cannot use the C library.
1373      */
1374     sigset_t set;
1375     struct {
1376         sigset_t *set;
1377         size_t size;
1378     } sig, *sig_ptr;
1379 
1380     abi_ulong arg_sigset, arg_sigsize, *arg7;
1381     target_sigset_t *target_sigset;
1382 
1383     n = arg1;
1384     rfd_addr = arg2;
1385     wfd_addr = arg3;
1386     efd_addr = arg4;
1387     ts_addr = arg5;
1388 
1389     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1390     if (ret) {
1391         return ret;
1392     }
1393     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1394     if (ret) {
1395         return ret;
1396     }
1397     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1398     if (ret) {
1399         return ret;
1400     }
1401 
1402     /*
1403      * This takes a timespec, and not a timeval, so we cannot
1404      * use the do_select() helper ...
1405      */
1406     if (ts_addr) {
1407         if (time64) {
1408             if (target_to_host_timespec64(&ts, ts_addr)) {
1409                 return -TARGET_EFAULT;
1410             }
1411         } else {
1412             if (target_to_host_timespec(&ts, ts_addr)) {
1413                 return -TARGET_EFAULT;
1414             }
1415         }
1416             ts_ptr = &ts;
1417     } else {
1418         ts_ptr = NULL;
1419     }
1420 
1421     /* Extract the two packed args for the sigset */
1422     if (arg6) {
1423         sig_ptr = &sig;
1424         sig.size = SIGSET_T_SIZE;
1425 
1426         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1427         if (!arg7) {
1428             return -TARGET_EFAULT;
1429         }
1430         arg_sigset = tswapal(arg7[0]);
1431         arg_sigsize = tswapal(arg7[1]);
1432         unlock_user(arg7, arg6, 0);
1433 
1434         if (arg_sigset) {
1435             sig.set = &set;
1436             if (arg_sigsize != sizeof(*target_sigset)) {
1437                 /* Like the kernel, we enforce correct size sigsets */
1438                 return -TARGET_EINVAL;
1439             }
1440             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1441                                       sizeof(*target_sigset), 1);
1442             if (!target_sigset) {
1443                 return -TARGET_EFAULT;
1444             }
1445             target_to_host_sigset(&set, target_sigset);
1446             unlock_user(target_sigset, arg_sigset, 0);
1447         } else {
1448             sig.set = NULL;
1449         }
1450     } else {
1451         sig_ptr = NULL;
1452     }
1453 
1454     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1455                                   ts_ptr, sig_ptr));
1456 
1457     if (!is_error(ret)) {
1458         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1459             return -TARGET_EFAULT;
1460         }
1461         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1462             return -TARGET_EFAULT;
1463         }
1464         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1465             return -TARGET_EFAULT;
1466         }
1467         if (time64) {
1468             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1469                 return -TARGET_EFAULT;
1470             }
1471         } else {
1472             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1473                 return -TARGET_EFAULT;
1474             }
1475         }
1476     }
1477     return ret;
1478 }
1479 #endif
1480 
1481 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1482     defined(TARGET_NR_ppoll_time64)
1483 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1484                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1485 {
1486     struct target_pollfd *target_pfd;
1487     unsigned int nfds = arg2;
1488     struct pollfd *pfd;
1489     unsigned int i;
1490     abi_long ret;
1491 
1492     pfd = NULL;
1493     target_pfd = NULL;
1494     if (nfds) {
1495         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1496             return -TARGET_EINVAL;
1497         }
1498         target_pfd = lock_user(VERIFY_WRITE, arg1,
1499                                sizeof(struct target_pollfd) * nfds, 1);
1500         if (!target_pfd) {
1501             return -TARGET_EFAULT;
1502         }
1503 
1504         pfd = alloca(sizeof(struct pollfd) * nfds);
1505         for (i = 0; i < nfds; i++) {
1506             pfd[i].fd = tswap32(target_pfd[i].fd);
1507             pfd[i].events = tswap16(target_pfd[i].events);
1508         }
1509     }
1510     if (ppoll) {
1511         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1512         target_sigset_t *target_set;
1513         sigset_t _set, *set = &_set;
1514 
1515         if (arg3) {
1516             if (time64) {
1517                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1518                     unlock_user(target_pfd, arg1, 0);
1519                     return -TARGET_EFAULT;
1520                 }
1521             } else {
1522                 if (target_to_host_timespec(timeout_ts, arg3)) {
1523                     unlock_user(target_pfd, arg1, 0);
1524                     return -TARGET_EFAULT;
1525                 }
1526             }
1527         } else {
1528             timeout_ts = NULL;
1529         }
1530 
1531         if (arg4) {
1532             if (arg5 != sizeof(target_sigset_t)) {
1533                 unlock_user(target_pfd, arg1, 0);
1534                 return -TARGET_EINVAL;
1535             }
1536 
1537             target_set = lock_user(VERIFY_READ, arg4,
1538                                    sizeof(target_sigset_t), 1);
1539             if (!target_set) {
1540                 unlock_user(target_pfd, arg1, 0);
1541                 return -TARGET_EFAULT;
1542             }
1543             target_to_host_sigset(set, target_set);
1544         } else {
1545             set = NULL;
1546         }
1547 
1548         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1549                                    set, SIGSET_T_SIZE));
1550 
1551         if (!is_error(ret) && arg3) {
1552             if (time64) {
1553                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1554                     return -TARGET_EFAULT;
1555                 }
1556             } else {
1557                 if (host_to_target_timespec(arg3, timeout_ts)) {
1558                     return -TARGET_EFAULT;
1559                 }
1560             }
1561         }
1562         if (arg4) {
1563             unlock_user(target_set, arg4, 0);
1564         }
1565     } else {
1566           struct timespec ts, *pts;
1567 
1568           if (arg3 >= 0) {
1569               /* Convert ms to secs, ns */
1570               ts.tv_sec = arg3 / 1000;
1571               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1572               pts = &ts;
1573           } else {
1574               /* -ve poll() timeout means "infinite" */
1575               pts = NULL;
1576           }
1577           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1578     }
1579 
1580     if (!is_error(ret)) {
1581         for (i = 0; i < nfds; i++) {
1582             target_pfd[i].revents = tswap16(pfd[i].revents);
1583         }
1584     }
1585     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1586     return ret;
1587 }
1588 #endif
1589 
1590 static abi_long do_pipe2(int host_pipe[], int flags)
1591 {
1592 #ifdef CONFIG_PIPE2
1593     return pipe2(host_pipe, flags);
1594 #else
1595     return -ENOSYS;
1596 #endif
1597 }
1598 
1599 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1600                         int flags, int is_pipe2)
1601 {
1602     int host_pipe[2];
1603     abi_long ret;
1604     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1605 
1606     if (is_error(ret))
1607         return get_errno(ret);
1608 
1609     /* Several targets have special calling conventions for the original
1610        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1611     if (!is_pipe2) {
1612 #if defined(TARGET_ALPHA)
1613         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1614         return host_pipe[0];
1615 #elif defined(TARGET_MIPS)
1616         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1617         return host_pipe[0];
1618 #elif defined(TARGET_SH4)
1619         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1620         return host_pipe[0];
1621 #elif defined(TARGET_SPARC)
1622         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1623         return host_pipe[0];
1624 #endif
1625     }
1626 
1627     if (put_user_s32(host_pipe[0], pipedes)
1628         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1629         return -TARGET_EFAULT;
1630     return get_errno(ret);
1631 }
1632 
1633 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1634                                               abi_ulong target_addr,
1635                                               socklen_t len)
1636 {
1637     struct target_ip_mreqn *target_smreqn;
1638 
1639     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1640     if (!target_smreqn)
1641         return -TARGET_EFAULT;
1642     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1643     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1644     if (len == sizeof(struct target_ip_mreqn))
1645         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1646     unlock_user(target_smreqn, target_addr, 0);
1647 
1648     return 0;
1649 }
1650 
1651 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1652                                                abi_ulong target_addr,
1653                                                socklen_t len)
1654 {
1655     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1656     sa_family_t sa_family;
1657     struct target_sockaddr *target_saddr;
1658 
1659     if (fd_trans_target_to_host_addr(fd)) {
1660         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1661     }
1662 
1663     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1664     if (!target_saddr)
1665         return -TARGET_EFAULT;
1666 
1667     sa_family = tswap16(target_saddr->sa_family);
1668 
1669     /* Oops. The caller might send a incomplete sun_path; sun_path
1670      * must be terminated by \0 (see the manual page), but
1671      * unfortunately it is quite common to specify sockaddr_un
1672      * length as "strlen(x->sun_path)" while it should be
1673      * "strlen(...) + 1". We'll fix that here if needed.
1674      * Linux kernel has a similar feature.
1675      */
1676 
1677     if (sa_family == AF_UNIX) {
1678         if (len < unix_maxlen && len > 0) {
1679             char *cp = (char*)target_saddr;
1680 
1681             if ( cp[len-1] && !cp[len] )
1682                 len++;
1683         }
1684         if (len > unix_maxlen)
1685             len = unix_maxlen;
1686     }
1687 
1688     memcpy(addr, target_saddr, len);
1689     addr->sa_family = sa_family;
1690     if (sa_family == AF_NETLINK) {
1691         struct sockaddr_nl *nladdr;
1692 
1693         nladdr = (struct sockaddr_nl *)addr;
1694         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1695         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1696     } else if (sa_family == AF_PACKET) {
1697 	struct target_sockaddr_ll *lladdr;
1698 
1699 	lladdr = (struct target_sockaddr_ll *)addr;
1700 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1701 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1702     }
1703     unlock_user(target_saddr, target_addr, 0);
1704 
1705     return 0;
1706 }
1707 
1708 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1709                                                struct sockaddr *addr,
1710                                                socklen_t len)
1711 {
1712     struct target_sockaddr *target_saddr;
1713 
1714     if (len == 0) {
1715         return 0;
1716     }
1717     assert(addr);
1718 
1719     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1720     if (!target_saddr)
1721         return -TARGET_EFAULT;
1722     memcpy(target_saddr, addr, len);
1723     if (len >= offsetof(struct target_sockaddr, sa_family) +
1724         sizeof(target_saddr->sa_family)) {
1725         target_saddr->sa_family = tswap16(addr->sa_family);
1726     }
1727     if (addr->sa_family == AF_NETLINK &&
1728         len >= sizeof(struct target_sockaddr_nl)) {
1729         struct target_sockaddr_nl *target_nl =
1730                (struct target_sockaddr_nl *)target_saddr;
1731         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1732         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1733     } else if (addr->sa_family == AF_PACKET) {
1734         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1735         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1736         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1737     } else if (addr->sa_family == AF_INET6 &&
1738                len >= sizeof(struct target_sockaddr_in6)) {
1739         struct target_sockaddr_in6 *target_in6 =
1740                (struct target_sockaddr_in6 *)target_saddr;
1741         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1742     }
1743     unlock_user(target_saddr, target_addr, len);
1744 
1745     return 0;
1746 }
1747 
1748 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1749                                            struct target_msghdr *target_msgh)
1750 {
1751     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1752     abi_long msg_controllen;
1753     abi_ulong target_cmsg_addr;
1754     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1755     socklen_t space = 0;
1756 
1757     msg_controllen = tswapal(target_msgh->msg_controllen);
1758     if (msg_controllen < sizeof (struct target_cmsghdr))
1759         goto the_end;
1760     target_cmsg_addr = tswapal(target_msgh->msg_control);
1761     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1762     target_cmsg_start = target_cmsg;
1763     if (!target_cmsg)
1764         return -TARGET_EFAULT;
1765 
1766     while (cmsg && target_cmsg) {
1767         void *data = CMSG_DATA(cmsg);
1768         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1769 
1770         int len = tswapal(target_cmsg->cmsg_len)
1771             - sizeof(struct target_cmsghdr);
1772 
1773         space += CMSG_SPACE(len);
1774         if (space > msgh->msg_controllen) {
1775             space -= CMSG_SPACE(len);
1776             /* This is a QEMU bug, since we allocated the payload
1777              * area ourselves (unlike overflow in host-to-target
1778              * conversion, which is just the guest giving us a buffer
1779              * that's too small). It can't happen for the payload types
1780              * we currently support; if it becomes an issue in future
1781              * we would need to improve our allocation strategy to
1782              * something more intelligent than "twice the size of the
1783              * target buffer we're reading from".
1784              */
1785             qemu_log_mask(LOG_UNIMP,
1786                           ("Unsupported ancillary data %d/%d: "
1787                            "unhandled msg size\n"),
1788                           tswap32(target_cmsg->cmsg_level),
1789                           tswap32(target_cmsg->cmsg_type));
1790             break;
1791         }
1792 
1793         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1794             cmsg->cmsg_level = SOL_SOCKET;
1795         } else {
1796             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1797         }
1798         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1799         cmsg->cmsg_len = CMSG_LEN(len);
1800 
1801         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1802             int *fd = (int *)data;
1803             int *target_fd = (int *)target_data;
1804             int i, numfds = len / sizeof(int);
1805 
1806             for (i = 0; i < numfds; i++) {
1807                 __get_user(fd[i], target_fd + i);
1808             }
1809         } else if (cmsg->cmsg_level == SOL_SOCKET
1810                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1811             struct ucred *cred = (struct ucred *)data;
1812             struct target_ucred *target_cred =
1813                 (struct target_ucred *)target_data;
1814 
1815             __get_user(cred->pid, &target_cred->pid);
1816             __get_user(cred->uid, &target_cred->uid);
1817             __get_user(cred->gid, &target_cred->gid);
1818         } else {
1819             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1820                           cmsg->cmsg_level, cmsg->cmsg_type);
1821             memcpy(data, target_data, len);
1822         }
1823 
1824         cmsg = CMSG_NXTHDR(msgh, cmsg);
1825         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1826                                          target_cmsg_start);
1827     }
1828     unlock_user(target_cmsg, target_cmsg_addr, 0);
1829  the_end:
1830     msgh->msg_controllen = space;
1831     return 0;
1832 }
1833 
1834 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1835                                            struct msghdr *msgh)
1836 {
1837     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1838     abi_long msg_controllen;
1839     abi_ulong target_cmsg_addr;
1840     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1841     socklen_t space = 0;
1842 
1843     msg_controllen = tswapal(target_msgh->msg_controllen);
1844     if (msg_controllen < sizeof (struct target_cmsghdr))
1845         goto the_end;
1846     target_cmsg_addr = tswapal(target_msgh->msg_control);
1847     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1848     target_cmsg_start = target_cmsg;
1849     if (!target_cmsg)
1850         return -TARGET_EFAULT;
1851 
1852     while (cmsg && target_cmsg) {
1853         void *data = CMSG_DATA(cmsg);
1854         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1855 
1856         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1857         int tgt_len, tgt_space;
1858 
1859         /* We never copy a half-header but may copy half-data;
1860          * this is Linux's behaviour in put_cmsg(). Note that
1861          * truncation here is a guest problem (which we report
1862          * to the guest via the CTRUNC bit), unlike truncation
1863          * in target_to_host_cmsg, which is a QEMU bug.
1864          */
1865         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1866             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1867             break;
1868         }
1869 
1870         if (cmsg->cmsg_level == SOL_SOCKET) {
1871             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1872         } else {
1873             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1874         }
1875         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1876 
1877         /* Payload types which need a different size of payload on
1878          * the target must adjust tgt_len here.
1879          */
1880         tgt_len = len;
1881         switch (cmsg->cmsg_level) {
1882         case SOL_SOCKET:
1883             switch (cmsg->cmsg_type) {
1884             case SO_TIMESTAMP:
1885                 tgt_len = sizeof(struct target_timeval);
1886                 break;
1887             default:
1888                 break;
1889             }
1890             break;
1891         default:
1892             break;
1893         }
1894 
1895         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1896             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1897             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1898         }
1899 
1900         /* We must now copy-and-convert len bytes of payload
1901          * into tgt_len bytes of destination space. Bear in mind
1902          * that in both source and destination we may be dealing
1903          * with a truncated value!
1904          */
1905         switch (cmsg->cmsg_level) {
1906         case SOL_SOCKET:
1907             switch (cmsg->cmsg_type) {
1908             case SCM_RIGHTS:
1909             {
1910                 int *fd = (int *)data;
1911                 int *target_fd = (int *)target_data;
1912                 int i, numfds = tgt_len / sizeof(int);
1913 
1914                 for (i = 0; i < numfds; i++) {
1915                     __put_user(fd[i], target_fd + i);
1916                 }
1917                 break;
1918             }
1919             case SO_TIMESTAMP:
1920             {
1921                 struct timeval *tv = (struct timeval *)data;
1922                 struct target_timeval *target_tv =
1923                     (struct target_timeval *)target_data;
1924 
1925                 if (len != sizeof(struct timeval) ||
1926                     tgt_len != sizeof(struct target_timeval)) {
1927                     goto unimplemented;
1928                 }
1929 
1930                 /* copy struct timeval to target */
1931                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1932                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1933                 break;
1934             }
1935             case SCM_CREDENTIALS:
1936             {
1937                 struct ucred *cred = (struct ucred *)data;
1938                 struct target_ucred *target_cred =
1939                     (struct target_ucred *)target_data;
1940 
1941                 __put_user(cred->pid, &target_cred->pid);
1942                 __put_user(cred->uid, &target_cred->uid);
1943                 __put_user(cred->gid, &target_cred->gid);
1944                 break;
1945             }
1946             default:
1947                 goto unimplemented;
1948             }
1949             break;
1950 
1951         case SOL_IP:
1952             switch (cmsg->cmsg_type) {
1953             case IP_TTL:
1954             {
1955                 uint32_t *v = (uint32_t *)data;
1956                 uint32_t *t_int = (uint32_t *)target_data;
1957 
1958                 if (len != sizeof(uint32_t) ||
1959                     tgt_len != sizeof(uint32_t)) {
1960                     goto unimplemented;
1961                 }
1962                 __put_user(*v, t_int);
1963                 break;
1964             }
1965             case IP_RECVERR:
1966             {
1967                 struct errhdr_t {
1968                    struct sock_extended_err ee;
1969                    struct sockaddr_in offender;
1970                 };
1971                 struct errhdr_t *errh = (struct errhdr_t *)data;
1972                 struct errhdr_t *target_errh =
1973                     (struct errhdr_t *)target_data;
1974 
1975                 if (len != sizeof(struct errhdr_t) ||
1976                     tgt_len != sizeof(struct errhdr_t)) {
1977                     goto unimplemented;
1978                 }
1979                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1980                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1981                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1982                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1983                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1984                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1985                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1986                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1987                     (void *) &errh->offender, sizeof(errh->offender));
1988                 break;
1989             }
1990             default:
1991                 goto unimplemented;
1992             }
1993             break;
1994 
1995         case SOL_IPV6:
1996             switch (cmsg->cmsg_type) {
1997             case IPV6_HOPLIMIT:
1998             {
1999                 uint32_t *v = (uint32_t *)data;
2000                 uint32_t *t_int = (uint32_t *)target_data;
2001 
2002                 if (len != sizeof(uint32_t) ||
2003                     tgt_len != sizeof(uint32_t)) {
2004                     goto unimplemented;
2005                 }
2006                 __put_user(*v, t_int);
2007                 break;
2008             }
2009             case IPV6_RECVERR:
2010             {
2011                 struct errhdr6_t {
2012                    struct sock_extended_err ee;
2013                    struct sockaddr_in6 offender;
2014                 };
2015                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2016                 struct errhdr6_t *target_errh =
2017                     (struct errhdr6_t *)target_data;
2018 
2019                 if (len != sizeof(struct errhdr6_t) ||
2020                     tgt_len != sizeof(struct errhdr6_t)) {
2021                     goto unimplemented;
2022                 }
2023                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2024                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2025                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2026                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2027                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2028                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2029                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2030                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2031                     (void *) &errh->offender, sizeof(errh->offender));
2032                 break;
2033             }
2034             default:
2035                 goto unimplemented;
2036             }
2037             break;
2038 
2039         default:
2040         unimplemented:
2041             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2042                           cmsg->cmsg_level, cmsg->cmsg_type);
2043             memcpy(target_data, data, MIN(len, tgt_len));
2044             if (tgt_len > len) {
2045                 memset(target_data + len, 0, tgt_len - len);
2046             }
2047         }
2048 
2049         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2050         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2051         if (msg_controllen < tgt_space) {
2052             tgt_space = msg_controllen;
2053         }
2054         msg_controllen -= tgt_space;
2055         space += tgt_space;
2056         cmsg = CMSG_NXTHDR(msgh, cmsg);
2057         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2058                                          target_cmsg_start);
2059     }
2060     unlock_user(target_cmsg, target_cmsg_addr, space);
2061  the_end:
2062     target_msgh->msg_controllen = tswapal(space);
2063     return 0;
2064 }
2065 
2066 /* do_setsockopt() Must return target values and target errnos. */
2067 static abi_long do_setsockopt(int sockfd, int level, int optname,
2068                               abi_ulong optval_addr, socklen_t optlen)
2069 {
2070     abi_long ret;
2071     int val;
2072     struct ip_mreqn *ip_mreq;
2073     struct ip_mreq_source *ip_mreq_source;
2074 
2075     switch(level) {
2076     case SOL_TCP:
2077     case SOL_UDP:
2078         /* TCP and UDP options all take an 'int' value.  */
2079         if (optlen < sizeof(uint32_t))
2080             return -TARGET_EINVAL;
2081 
2082         if (get_user_u32(val, optval_addr))
2083             return -TARGET_EFAULT;
2084         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2085         break;
2086     case SOL_IP:
2087         switch(optname) {
2088         case IP_TOS:
2089         case IP_TTL:
2090         case IP_HDRINCL:
2091         case IP_ROUTER_ALERT:
2092         case IP_RECVOPTS:
2093         case IP_RETOPTS:
2094         case IP_PKTINFO:
2095         case IP_MTU_DISCOVER:
2096         case IP_RECVERR:
2097         case IP_RECVTTL:
2098         case IP_RECVTOS:
2099 #ifdef IP_FREEBIND
2100         case IP_FREEBIND:
2101 #endif
2102         case IP_MULTICAST_TTL:
2103         case IP_MULTICAST_LOOP:
2104             val = 0;
2105             if (optlen >= sizeof(uint32_t)) {
2106                 if (get_user_u32(val, optval_addr))
2107                     return -TARGET_EFAULT;
2108             } else if (optlen >= 1) {
2109                 if (get_user_u8(val, optval_addr))
2110                     return -TARGET_EFAULT;
2111             }
2112             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2113             break;
2114         case IP_ADD_MEMBERSHIP:
2115         case IP_DROP_MEMBERSHIP:
2116             if (optlen < sizeof (struct target_ip_mreq) ||
2117                 optlen > sizeof (struct target_ip_mreqn))
2118                 return -TARGET_EINVAL;
2119 
2120             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2121             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2122             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2123             break;
2124 
2125         case IP_BLOCK_SOURCE:
2126         case IP_UNBLOCK_SOURCE:
2127         case IP_ADD_SOURCE_MEMBERSHIP:
2128         case IP_DROP_SOURCE_MEMBERSHIP:
2129             if (optlen != sizeof (struct target_ip_mreq_source))
2130                 return -TARGET_EINVAL;
2131 
2132             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2133             if (!ip_mreq_source) {
2134                 return -TARGET_EFAULT;
2135             }
2136             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2137             unlock_user (ip_mreq_source, optval_addr, 0);
2138             break;
2139 
2140         default:
2141             goto unimplemented;
2142         }
2143         break;
2144     case SOL_IPV6:
2145         switch (optname) {
2146         case IPV6_MTU_DISCOVER:
2147         case IPV6_MTU:
2148         case IPV6_V6ONLY:
2149         case IPV6_RECVPKTINFO:
2150         case IPV6_UNICAST_HOPS:
2151         case IPV6_MULTICAST_HOPS:
2152         case IPV6_MULTICAST_LOOP:
2153         case IPV6_RECVERR:
2154         case IPV6_RECVHOPLIMIT:
2155         case IPV6_2292HOPLIMIT:
2156         case IPV6_CHECKSUM:
2157         case IPV6_ADDRFORM:
2158         case IPV6_2292PKTINFO:
2159         case IPV6_RECVTCLASS:
2160         case IPV6_RECVRTHDR:
2161         case IPV6_2292RTHDR:
2162         case IPV6_RECVHOPOPTS:
2163         case IPV6_2292HOPOPTS:
2164         case IPV6_RECVDSTOPTS:
2165         case IPV6_2292DSTOPTS:
2166         case IPV6_TCLASS:
2167         case IPV6_ADDR_PREFERENCES:
2168 #ifdef IPV6_RECVPATHMTU
2169         case IPV6_RECVPATHMTU:
2170 #endif
2171 #ifdef IPV6_TRANSPARENT
2172         case IPV6_TRANSPARENT:
2173 #endif
2174 #ifdef IPV6_FREEBIND
2175         case IPV6_FREEBIND:
2176 #endif
2177 #ifdef IPV6_RECVORIGDSTADDR
2178         case IPV6_RECVORIGDSTADDR:
2179 #endif
2180             val = 0;
2181             if (optlen < sizeof(uint32_t)) {
2182                 return -TARGET_EINVAL;
2183             }
2184             if (get_user_u32(val, optval_addr)) {
2185                 return -TARGET_EFAULT;
2186             }
2187             ret = get_errno(setsockopt(sockfd, level, optname,
2188                                        &val, sizeof(val)));
2189             break;
2190         case IPV6_PKTINFO:
2191         {
2192             struct in6_pktinfo pki;
2193 
2194             if (optlen < sizeof(pki)) {
2195                 return -TARGET_EINVAL;
2196             }
2197 
2198             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2199                 return -TARGET_EFAULT;
2200             }
2201 
2202             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2203 
2204             ret = get_errno(setsockopt(sockfd, level, optname,
2205                                        &pki, sizeof(pki)));
2206             break;
2207         }
2208         case IPV6_ADD_MEMBERSHIP:
2209         case IPV6_DROP_MEMBERSHIP:
2210         {
2211             struct ipv6_mreq ipv6mreq;
2212 
2213             if (optlen < sizeof(ipv6mreq)) {
2214                 return -TARGET_EINVAL;
2215             }
2216 
2217             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2218                 return -TARGET_EFAULT;
2219             }
2220 
2221             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2222 
2223             ret = get_errno(setsockopt(sockfd, level, optname,
2224                                        &ipv6mreq, sizeof(ipv6mreq)));
2225             break;
2226         }
2227         default:
2228             goto unimplemented;
2229         }
2230         break;
2231     case SOL_ICMPV6:
2232         switch (optname) {
2233         case ICMPV6_FILTER:
2234         {
2235             struct icmp6_filter icmp6f;
2236 
2237             if (optlen > sizeof(icmp6f)) {
2238                 optlen = sizeof(icmp6f);
2239             }
2240 
2241             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2242                 return -TARGET_EFAULT;
2243             }
2244 
2245             for (val = 0; val < 8; val++) {
2246                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2247             }
2248 
2249             ret = get_errno(setsockopt(sockfd, level, optname,
2250                                        &icmp6f, optlen));
2251             break;
2252         }
2253         default:
2254             goto unimplemented;
2255         }
2256         break;
2257     case SOL_RAW:
2258         switch (optname) {
2259         case ICMP_FILTER:
2260         case IPV6_CHECKSUM:
2261             /* those take an u32 value */
2262             if (optlen < sizeof(uint32_t)) {
2263                 return -TARGET_EINVAL;
2264             }
2265 
2266             if (get_user_u32(val, optval_addr)) {
2267                 return -TARGET_EFAULT;
2268             }
2269             ret = get_errno(setsockopt(sockfd, level, optname,
2270                                        &val, sizeof(val)));
2271             break;
2272 
2273         default:
2274             goto unimplemented;
2275         }
2276         break;
2277 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2278     case SOL_ALG:
2279         switch (optname) {
2280         case ALG_SET_KEY:
2281         {
2282             char *alg_key = g_malloc(optlen);
2283 
2284             if (!alg_key) {
2285                 return -TARGET_ENOMEM;
2286             }
2287             if (copy_from_user(alg_key, optval_addr, optlen)) {
2288                 g_free(alg_key);
2289                 return -TARGET_EFAULT;
2290             }
2291             ret = get_errno(setsockopt(sockfd, level, optname,
2292                                        alg_key, optlen));
2293             g_free(alg_key);
2294             break;
2295         }
2296         case ALG_SET_AEAD_AUTHSIZE:
2297         {
2298             ret = get_errno(setsockopt(sockfd, level, optname,
2299                                        NULL, optlen));
2300             break;
2301         }
2302         default:
2303             goto unimplemented;
2304         }
2305         break;
2306 #endif
2307     case TARGET_SOL_SOCKET:
2308         switch (optname) {
2309         case TARGET_SO_RCVTIMEO:
2310         {
2311                 struct timeval tv;
2312 
2313                 optname = SO_RCVTIMEO;
2314 
2315 set_timeout:
2316                 if (optlen != sizeof(struct target_timeval)) {
2317                     return -TARGET_EINVAL;
2318                 }
2319 
2320                 if (copy_from_user_timeval(&tv, optval_addr)) {
2321                     return -TARGET_EFAULT;
2322                 }
2323 
2324                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2325                                 &tv, sizeof(tv)));
2326                 return ret;
2327         }
2328         case TARGET_SO_SNDTIMEO:
2329                 optname = SO_SNDTIMEO;
2330                 goto set_timeout;
2331         case TARGET_SO_ATTACH_FILTER:
2332         {
2333                 struct target_sock_fprog *tfprog;
2334                 struct target_sock_filter *tfilter;
2335                 struct sock_fprog fprog;
2336                 struct sock_filter *filter;
2337                 int i;
2338 
2339                 if (optlen != sizeof(*tfprog)) {
2340                     return -TARGET_EINVAL;
2341                 }
2342                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2343                     return -TARGET_EFAULT;
2344                 }
2345                 if (!lock_user_struct(VERIFY_READ, tfilter,
2346                                       tswapal(tfprog->filter), 0)) {
2347                     unlock_user_struct(tfprog, optval_addr, 1);
2348                     return -TARGET_EFAULT;
2349                 }
2350 
2351                 fprog.len = tswap16(tfprog->len);
2352                 filter = g_try_new(struct sock_filter, fprog.len);
2353                 if (filter == NULL) {
2354                     unlock_user_struct(tfilter, tfprog->filter, 1);
2355                     unlock_user_struct(tfprog, optval_addr, 1);
2356                     return -TARGET_ENOMEM;
2357                 }
2358                 for (i = 0; i < fprog.len; i++) {
2359                     filter[i].code = tswap16(tfilter[i].code);
2360                     filter[i].jt = tfilter[i].jt;
2361                     filter[i].jf = tfilter[i].jf;
2362                     filter[i].k = tswap32(tfilter[i].k);
2363                 }
2364                 fprog.filter = filter;
2365 
2366                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2367                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2368                 g_free(filter);
2369 
2370                 unlock_user_struct(tfilter, tfprog->filter, 1);
2371                 unlock_user_struct(tfprog, optval_addr, 1);
2372                 return ret;
2373         }
2374 	case TARGET_SO_BINDTODEVICE:
2375 	{
2376 		char *dev_ifname, *addr_ifname;
2377 
2378 		if (optlen > IFNAMSIZ - 1) {
2379 		    optlen = IFNAMSIZ - 1;
2380 		}
2381 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2382 		if (!dev_ifname) {
2383 		    return -TARGET_EFAULT;
2384 		}
2385 		optname = SO_BINDTODEVICE;
2386 		addr_ifname = alloca(IFNAMSIZ);
2387 		memcpy(addr_ifname, dev_ifname, optlen);
2388 		addr_ifname[optlen] = 0;
2389 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2390                                            addr_ifname, optlen));
2391 		unlock_user (dev_ifname, optval_addr, 0);
2392 		return ret;
2393 	}
2394         case TARGET_SO_LINGER:
2395         {
2396                 struct linger lg;
2397                 struct target_linger *tlg;
2398 
2399                 if (optlen != sizeof(struct target_linger)) {
2400                     return -TARGET_EINVAL;
2401                 }
2402                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2403                     return -TARGET_EFAULT;
2404                 }
2405                 __get_user(lg.l_onoff, &tlg->l_onoff);
2406                 __get_user(lg.l_linger, &tlg->l_linger);
2407                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2408                                 &lg, sizeof(lg)));
2409                 unlock_user_struct(tlg, optval_addr, 0);
2410                 return ret;
2411         }
2412             /* Options with 'int' argument.  */
2413         case TARGET_SO_DEBUG:
2414 		optname = SO_DEBUG;
2415 		break;
2416         case TARGET_SO_REUSEADDR:
2417 		optname = SO_REUSEADDR;
2418 		break;
2419 #ifdef SO_REUSEPORT
2420         case TARGET_SO_REUSEPORT:
2421                 optname = SO_REUSEPORT;
2422                 break;
2423 #endif
2424         case TARGET_SO_TYPE:
2425 		optname = SO_TYPE;
2426 		break;
2427         case TARGET_SO_ERROR:
2428 		optname = SO_ERROR;
2429 		break;
2430         case TARGET_SO_DONTROUTE:
2431 		optname = SO_DONTROUTE;
2432 		break;
2433         case TARGET_SO_BROADCAST:
2434 		optname = SO_BROADCAST;
2435 		break;
2436         case TARGET_SO_SNDBUF:
2437 		optname = SO_SNDBUF;
2438 		break;
2439         case TARGET_SO_SNDBUFFORCE:
2440                 optname = SO_SNDBUFFORCE;
2441                 break;
2442         case TARGET_SO_RCVBUF:
2443 		optname = SO_RCVBUF;
2444 		break;
2445         case TARGET_SO_RCVBUFFORCE:
2446                 optname = SO_RCVBUFFORCE;
2447                 break;
2448         case TARGET_SO_KEEPALIVE:
2449 		optname = SO_KEEPALIVE;
2450 		break;
2451         case TARGET_SO_OOBINLINE:
2452 		optname = SO_OOBINLINE;
2453 		break;
2454         case TARGET_SO_NO_CHECK:
2455 		optname = SO_NO_CHECK;
2456 		break;
2457         case TARGET_SO_PRIORITY:
2458 		optname = SO_PRIORITY;
2459 		break;
2460 #ifdef SO_BSDCOMPAT
2461         case TARGET_SO_BSDCOMPAT:
2462 		optname = SO_BSDCOMPAT;
2463 		break;
2464 #endif
2465         case TARGET_SO_PASSCRED:
2466 		optname = SO_PASSCRED;
2467 		break;
2468         case TARGET_SO_PASSSEC:
2469                 optname = SO_PASSSEC;
2470                 break;
2471         case TARGET_SO_TIMESTAMP:
2472 		optname = SO_TIMESTAMP;
2473 		break;
2474         case TARGET_SO_RCVLOWAT:
2475 		optname = SO_RCVLOWAT;
2476 		break;
2477         default:
2478             goto unimplemented;
2479         }
2480 	if (optlen < sizeof(uint32_t))
2481             return -TARGET_EINVAL;
2482 
2483 	if (get_user_u32(val, optval_addr))
2484             return -TARGET_EFAULT;
2485 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2486         break;
2487 #ifdef SOL_NETLINK
2488     case SOL_NETLINK:
2489         switch (optname) {
2490         case NETLINK_PKTINFO:
2491         case NETLINK_ADD_MEMBERSHIP:
2492         case NETLINK_DROP_MEMBERSHIP:
2493         case NETLINK_BROADCAST_ERROR:
2494         case NETLINK_NO_ENOBUFS:
2495 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2496         case NETLINK_LISTEN_ALL_NSID:
2497         case NETLINK_CAP_ACK:
2498 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2499 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2500         case NETLINK_EXT_ACK:
2501 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2502 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2503         case NETLINK_GET_STRICT_CHK:
2504 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2505             break;
2506         default:
2507             goto unimplemented;
2508         }
2509         val = 0;
2510         if (optlen < sizeof(uint32_t)) {
2511             return -TARGET_EINVAL;
2512         }
2513         if (get_user_u32(val, optval_addr)) {
2514             return -TARGET_EFAULT;
2515         }
2516         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2517                                    sizeof(val)));
2518         break;
2519 #endif /* SOL_NETLINK */
2520     default:
2521     unimplemented:
2522         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2523                       level, optname);
2524         ret = -TARGET_ENOPROTOOPT;
2525     }
2526     return ret;
2527 }
2528 
2529 /* do_getsockopt() Must return target values and target errnos. */
2530 static abi_long do_getsockopt(int sockfd, int level, int optname,
2531                               abi_ulong optval_addr, abi_ulong optlen)
2532 {
2533     abi_long ret;
2534     int len, val;
2535     socklen_t lv;
2536 
2537     switch(level) {
2538     case TARGET_SOL_SOCKET:
2539         level = SOL_SOCKET;
2540         switch (optname) {
2541         /* These don't just return a single integer */
2542         case TARGET_SO_PEERNAME:
2543             goto unimplemented;
2544         case TARGET_SO_RCVTIMEO: {
2545             struct timeval tv;
2546             socklen_t tvlen;
2547 
2548             optname = SO_RCVTIMEO;
2549 
2550 get_timeout:
2551             if (get_user_u32(len, optlen)) {
2552                 return -TARGET_EFAULT;
2553             }
2554             if (len < 0) {
2555                 return -TARGET_EINVAL;
2556             }
2557 
2558             tvlen = sizeof(tv);
2559             ret = get_errno(getsockopt(sockfd, level, optname,
2560                                        &tv, &tvlen));
2561             if (ret < 0) {
2562                 return ret;
2563             }
2564             if (len > sizeof(struct target_timeval)) {
2565                 len = sizeof(struct target_timeval);
2566             }
2567             if (copy_to_user_timeval(optval_addr, &tv)) {
2568                 return -TARGET_EFAULT;
2569             }
2570             if (put_user_u32(len, optlen)) {
2571                 return -TARGET_EFAULT;
2572             }
2573             break;
2574         }
2575         case TARGET_SO_SNDTIMEO:
2576             optname = SO_SNDTIMEO;
2577             goto get_timeout;
2578         case TARGET_SO_PEERCRED: {
2579             struct ucred cr;
2580             socklen_t crlen;
2581             struct target_ucred *tcr;
2582 
2583             if (get_user_u32(len, optlen)) {
2584                 return -TARGET_EFAULT;
2585             }
2586             if (len < 0) {
2587                 return -TARGET_EINVAL;
2588             }
2589 
2590             crlen = sizeof(cr);
2591             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2592                                        &cr, &crlen));
2593             if (ret < 0) {
2594                 return ret;
2595             }
2596             if (len > crlen) {
2597                 len = crlen;
2598             }
2599             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             __put_user(cr.pid, &tcr->pid);
2603             __put_user(cr.uid, &tcr->uid);
2604             __put_user(cr.gid, &tcr->gid);
2605             unlock_user_struct(tcr, optval_addr, 1);
2606             if (put_user_u32(len, optlen)) {
2607                 return -TARGET_EFAULT;
2608             }
2609             break;
2610         }
2611         case TARGET_SO_PEERSEC: {
2612             char *name;
2613 
2614             if (get_user_u32(len, optlen)) {
2615                 return -TARGET_EFAULT;
2616             }
2617             if (len < 0) {
2618                 return -TARGET_EINVAL;
2619             }
2620             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2621             if (!name) {
2622                 return -TARGET_EFAULT;
2623             }
2624             lv = len;
2625             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2626                                        name, &lv));
2627             if (put_user_u32(lv, optlen)) {
2628                 ret = -TARGET_EFAULT;
2629             }
2630             unlock_user(name, optval_addr, lv);
2631             break;
2632         }
2633         case TARGET_SO_LINGER:
2634         {
2635             struct linger lg;
2636             socklen_t lglen;
2637             struct target_linger *tlg;
2638 
2639             if (get_user_u32(len, optlen)) {
2640                 return -TARGET_EFAULT;
2641             }
2642             if (len < 0) {
2643                 return -TARGET_EINVAL;
2644             }
2645 
2646             lglen = sizeof(lg);
2647             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2648                                        &lg, &lglen));
2649             if (ret < 0) {
2650                 return ret;
2651             }
2652             if (len > lglen) {
2653                 len = lglen;
2654             }
2655             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             __put_user(lg.l_onoff, &tlg->l_onoff);
2659             __put_user(lg.l_linger, &tlg->l_linger);
2660             unlock_user_struct(tlg, optval_addr, 1);
2661             if (put_user_u32(len, optlen)) {
2662                 return -TARGET_EFAULT;
2663             }
2664             break;
2665         }
2666         /* Options with 'int' argument.  */
2667         case TARGET_SO_DEBUG:
2668             optname = SO_DEBUG;
2669             goto int_case;
2670         case TARGET_SO_REUSEADDR:
2671             optname = SO_REUSEADDR;
2672             goto int_case;
2673 #ifdef SO_REUSEPORT
2674         case TARGET_SO_REUSEPORT:
2675             optname = SO_REUSEPORT;
2676             goto int_case;
2677 #endif
2678         case TARGET_SO_TYPE:
2679             optname = SO_TYPE;
2680             goto int_case;
2681         case TARGET_SO_ERROR:
2682             optname = SO_ERROR;
2683             goto int_case;
2684         case TARGET_SO_DONTROUTE:
2685             optname = SO_DONTROUTE;
2686             goto int_case;
2687         case TARGET_SO_BROADCAST:
2688             optname = SO_BROADCAST;
2689             goto int_case;
2690         case TARGET_SO_SNDBUF:
2691             optname = SO_SNDBUF;
2692             goto int_case;
2693         case TARGET_SO_RCVBUF:
2694             optname = SO_RCVBUF;
2695             goto int_case;
2696         case TARGET_SO_KEEPALIVE:
2697             optname = SO_KEEPALIVE;
2698             goto int_case;
2699         case TARGET_SO_OOBINLINE:
2700             optname = SO_OOBINLINE;
2701             goto int_case;
2702         case TARGET_SO_NO_CHECK:
2703             optname = SO_NO_CHECK;
2704             goto int_case;
2705         case TARGET_SO_PRIORITY:
2706             optname = SO_PRIORITY;
2707             goto int_case;
2708 #ifdef SO_BSDCOMPAT
2709         case TARGET_SO_BSDCOMPAT:
2710             optname = SO_BSDCOMPAT;
2711             goto int_case;
2712 #endif
2713         case TARGET_SO_PASSCRED:
2714             optname = SO_PASSCRED;
2715             goto int_case;
2716         case TARGET_SO_TIMESTAMP:
2717             optname = SO_TIMESTAMP;
2718             goto int_case;
2719         case TARGET_SO_RCVLOWAT:
2720             optname = SO_RCVLOWAT;
2721             goto int_case;
2722         case TARGET_SO_ACCEPTCONN:
2723             optname = SO_ACCEPTCONN;
2724             goto int_case;
2725         case TARGET_SO_PROTOCOL:
2726             optname = SO_PROTOCOL;
2727             goto int_case;
2728         case TARGET_SO_DOMAIN:
2729             optname = SO_DOMAIN;
2730             goto int_case;
2731         default:
2732             goto int_case;
2733         }
2734         break;
2735     case SOL_TCP:
2736     case SOL_UDP:
2737         /* TCP and UDP options all take an 'int' value.  */
2738     int_case:
2739         if (get_user_u32(len, optlen))
2740             return -TARGET_EFAULT;
2741         if (len < 0)
2742             return -TARGET_EINVAL;
2743         lv = sizeof(lv);
2744         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2745         if (ret < 0)
2746             return ret;
2747         if (optname == SO_TYPE) {
2748             val = host_to_target_sock_type(val);
2749         }
2750         if (len > lv)
2751             len = lv;
2752         if (len == 4) {
2753             if (put_user_u32(val, optval_addr))
2754                 return -TARGET_EFAULT;
2755         } else {
2756             if (put_user_u8(val, optval_addr))
2757                 return -TARGET_EFAULT;
2758         }
2759         if (put_user_u32(len, optlen))
2760             return -TARGET_EFAULT;
2761         break;
2762     case SOL_IP:
2763         switch(optname) {
2764         case IP_TOS:
2765         case IP_TTL:
2766         case IP_HDRINCL:
2767         case IP_ROUTER_ALERT:
2768         case IP_RECVOPTS:
2769         case IP_RETOPTS:
2770         case IP_PKTINFO:
2771         case IP_MTU_DISCOVER:
2772         case IP_RECVERR:
2773         case IP_RECVTOS:
2774 #ifdef IP_FREEBIND
2775         case IP_FREEBIND:
2776 #endif
2777         case IP_MULTICAST_TTL:
2778         case IP_MULTICAST_LOOP:
2779             if (get_user_u32(len, optlen))
2780                 return -TARGET_EFAULT;
2781             if (len < 0)
2782                 return -TARGET_EINVAL;
2783             lv = sizeof(lv);
2784             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2785             if (ret < 0)
2786                 return ret;
2787             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2788                 len = 1;
2789                 if (put_user_u32(len, optlen)
2790                     || put_user_u8(val, optval_addr))
2791                     return -TARGET_EFAULT;
2792             } else {
2793                 if (len > sizeof(int))
2794                     len = sizeof(int);
2795                 if (put_user_u32(len, optlen)
2796                     || put_user_u32(val, optval_addr))
2797                     return -TARGET_EFAULT;
2798             }
2799             break;
2800         default:
2801             ret = -TARGET_ENOPROTOOPT;
2802             break;
2803         }
2804         break;
2805     case SOL_IPV6:
2806         switch (optname) {
2807         case IPV6_MTU_DISCOVER:
2808         case IPV6_MTU:
2809         case IPV6_V6ONLY:
2810         case IPV6_RECVPKTINFO:
2811         case IPV6_UNICAST_HOPS:
2812         case IPV6_MULTICAST_HOPS:
2813         case IPV6_MULTICAST_LOOP:
2814         case IPV6_RECVERR:
2815         case IPV6_RECVHOPLIMIT:
2816         case IPV6_2292HOPLIMIT:
2817         case IPV6_CHECKSUM:
2818         case IPV6_ADDRFORM:
2819         case IPV6_2292PKTINFO:
2820         case IPV6_RECVTCLASS:
2821         case IPV6_RECVRTHDR:
2822         case IPV6_2292RTHDR:
2823         case IPV6_RECVHOPOPTS:
2824         case IPV6_2292HOPOPTS:
2825         case IPV6_RECVDSTOPTS:
2826         case IPV6_2292DSTOPTS:
2827         case IPV6_TCLASS:
2828         case IPV6_ADDR_PREFERENCES:
2829 #ifdef IPV6_RECVPATHMTU
2830         case IPV6_RECVPATHMTU:
2831 #endif
2832 #ifdef IPV6_TRANSPARENT
2833         case IPV6_TRANSPARENT:
2834 #endif
2835 #ifdef IPV6_FREEBIND
2836         case IPV6_FREEBIND:
2837 #endif
2838 #ifdef IPV6_RECVORIGDSTADDR
2839         case IPV6_RECVORIGDSTADDR:
2840 #endif
2841             if (get_user_u32(len, optlen))
2842                 return -TARGET_EFAULT;
2843             if (len < 0)
2844                 return -TARGET_EINVAL;
2845             lv = sizeof(lv);
2846             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2847             if (ret < 0)
2848                 return ret;
2849             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2850                 len = 1;
2851                 if (put_user_u32(len, optlen)
2852                     || put_user_u8(val, optval_addr))
2853                     return -TARGET_EFAULT;
2854             } else {
2855                 if (len > sizeof(int))
2856                     len = sizeof(int);
2857                 if (put_user_u32(len, optlen)
2858                     || put_user_u32(val, optval_addr))
2859                     return -TARGET_EFAULT;
2860             }
2861             break;
2862         default:
2863             ret = -TARGET_ENOPROTOOPT;
2864             break;
2865         }
2866         break;
2867 #ifdef SOL_NETLINK
2868     case SOL_NETLINK:
2869         switch (optname) {
2870         case NETLINK_PKTINFO:
2871         case NETLINK_BROADCAST_ERROR:
2872         case NETLINK_NO_ENOBUFS:
2873 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2874         case NETLINK_LISTEN_ALL_NSID:
2875         case NETLINK_CAP_ACK:
2876 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2878         case NETLINK_EXT_ACK:
2879 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2880 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2881         case NETLINK_GET_STRICT_CHK:
2882 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2883             if (get_user_u32(len, optlen)) {
2884                 return -TARGET_EFAULT;
2885             }
2886             if (len != sizeof(val)) {
2887                 return -TARGET_EINVAL;
2888             }
2889             lv = len;
2890             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2891             if (ret < 0) {
2892                 return ret;
2893             }
2894             if (put_user_u32(lv, optlen)
2895                 || put_user_u32(val, optval_addr)) {
2896                 return -TARGET_EFAULT;
2897             }
2898             break;
2899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2900         case NETLINK_LIST_MEMBERSHIPS:
2901         {
2902             uint32_t *results;
2903             int i;
2904             if (get_user_u32(len, optlen)) {
2905                 return -TARGET_EFAULT;
2906             }
2907             if (len < 0) {
2908                 return -TARGET_EINVAL;
2909             }
2910             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2911             if (!results && len > 0) {
2912                 return -TARGET_EFAULT;
2913             }
2914             lv = len;
2915             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2916             if (ret < 0) {
2917                 unlock_user(results, optval_addr, 0);
2918                 return ret;
2919             }
2920             /* swap host endianess to target endianess. */
2921             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2922                 results[i] = tswap32(results[i]);
2923             }
2924             if (put_user_u32(lv, optlen)) {
2925                 return -TARGET_EFAULT;
2926             }
2927             unlock_user(results, optval_addr, 0);
2928             break;
2929         }
2930 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2931         default:
2932             goto unimplemented;
2933         }
2934         break;
2935 #endif /* SOL_NETLINK */
2936     default:
2937     unimplemented:
2938         qemu_log_mask(LOG_UNIMP,
2939                       "getsockopt level=%d optname=%d not yet supported\n",
2940                       level, optname);
2941         ret = -TARGET_EOPNOTSUPP;
2942         break;
2943     }
2944     return ret;
2945 }
2946 
2947 /* Convert target low/high pair representing file offset into the host
2948  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2949  * as the kernel doesn't handle them either.
2950  */
2951 static void target_to_host_low_high(abi_ulong tlow,
2952                                     abi_ulong thigh,
2953                                     unsigned long *hlow,
2954                                     unsigned long *hhigh)
2955 {
2956     uint64_t off = tlow |
2957         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2958         TARGET_LONG_BITS / 2;
2959 
2960     *hlow = off;
2961     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2962 }
2963 
2964 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2965                                 abi_ulong count, int copy)
2966 {
2967     struct target_iovec *target_vec;
2968     struct iovec *vec;
2969     abi_ulong total_len, max_len;
2970     int i;
2971     int err = 0;
2972     bool bad_address = false;
2973 
2974     if (count == 0) {
2975         errno = 0;
2976         return NULL;
2977     }
2978     if (count > IOV_MAX) {
2979         errno = EINVAL;
2980         return NULL;
2981     }
2982 
2983     vec = g_try_new0(struct iovec, count);
2984     if (vec == NULL) {
2985         errno = ENOMEM;
2986         return NULL;
2987     }
2988 
2989     target_vec = lock_user(VERIFY_READ, target_addr,
2990                            count * sizeof(struct target_iovec), 1);
2991     if (target_vec == NULL) {
2992         err = EFAULT;
2993         goto fail2;
2994     }
2995 
2996     /* ??? If host page size > target page size, this will result in a
2997        value larger than what we can actually support.  */
2998     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2999     total_len = 0;
3000 
3001     for (i = 0; i < count; i++) {
3002         abi_ulong base = tswapal(target_vec[i].iov_base);
3003         abi_long len = tswapal(target_vec[i].iov_len);
3004 
3005         if (len < 0) {
3006             err = EINVAL;
3007             goto fail;
3008         } else if (len == 0) {
3009             /* Zero length pointer is ignored.  */
3010             vec[i].iov_base = 0;
3011         } else {
3012             vec[i].iov_base = lock_user(type, base, len, copy);
3013             /* If the first buffer pointer is bad, this is a fault.  But
3014              * subsequent bad buffers will result in a partial write; this
3015              * is realized by filling the vector with null pointers and
3016              * zero lengths. */
3017             if (!vec[i].iov_base) {
3018                 if (i == 0) {
3019                     err = EFAULT;
3020                     goto fail;
3021                 } else {
3022                     bad_address = true;
3023                 }
3024             }
3025             if (bad_address) {
3026                 len = 0;
3027             }
3028             if (len > max_len - total_len) {
3029                 len = max_len - total_len;
3030             }
3031         }
3032         vec[i].iov_len = len;
3033         total_len += len;
3034     }
3035 
3036     unlock_user(target_vec, target_addr, 0);
3037     return vec;
3038 
3039  fail:
3040     while (--i >= 0) {
3041         if (tswapal(target_vec[i].iov_len) > 0) {
3042             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3043         }
3044     }
3045     unlock_user(target_vec, target_addr, 0);
3046  fail2:
3047     g_free(vec);
3048     errno = err;
3049     return NULL;
3050 }
3051 
3052 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3053                          abi_ulong count, int copy)
3054 {
3055     struct target_iovec *target_vec;
3056     int i;
3057 
3058     target_vec = lock_user(VERIFY_READ, target_addr,
3059                            count * sizeof(struct target_iovec), 1);
3060     if (target_vec) {
3061         for (i = 0; i < count; i++) {
3062             abi_ulong base = tswapal(target_vec[i].iov_base);
3063             abi_long len = tswapal(target_vec[i].iov_len);
3064             if (len < 0) {
3065                 break;
3066             }
3067             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3068         }
3069         unlock_user(target_vec, target_addr, 0);
3070     }
3071 
3072     g_free(vec);
3073 }
3074 
3075 static inline int target_to_host_sock_type(int *type)
3076 {
3077     int host_type = 0;
3078     int target_type = *type;
3079 
3080     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3081     case TARGET_SOCK_DGRAM:
3082         host_type = SOCK_DGRAM;
3083         break;
3084     case TARGET_SOCK_STREAM:
3085         host_type = SOCK_STREAM;
3086         break;
3087     default:
3088         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3089         break;
3090     }
3091     if (target_type & TARGET_SOCK_CLOEXEC) {
3092 #if defined(SOCK_CLOEXEC)
3093         host_type |= SOCK_CLOEXEC;
3094 #else
3095         return -TARGET_EINVAL;
3096 #endif
3097     }
3098     if (target_type & TARGET_SOCK_NONBLOCK) {
3099 #if defined(SOCK_NONBLOCK)
3100         host_type |= SOCK_NONBLOCK;
3101 #elif !defined(O_NONBLOCK)
3102         return -TARGET_EINVAL;
3103 #endif
3104     }
3105     *type = host_type;
3106     return 0;
3107 }
3108 
3109 /* Try to emulate socket type flags after socket creation.  */
3110 static int sock_flags_fixup(int fd, int target_type)
3111 {
3112 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3113     if (target_type & TARGET_SOCK_NONBLOCK) {
3114         int flags = fcntl(fd, F_GETFL);
3115         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3116             close(fd);
3117             return -TARGET_EINVAL;
3118         }
3119     }
3120 #endif
3121     return fd;
3122 }
3123 
3124 /* do_socket() Must return target values and target errnos. */
3125 static abi_long do_socket(int domain, int type, int protocol)
3126 {
3127     int target_type = type;
3128     int ret;
3129 
3130     ret = target_to_host_sock_type(&type);
3131     if (ret) {
3132         return ret;
3133     }
3134 
3135     if (domain == PF_NETLINK && !(
3136 #ifdef CONFIG_RTNETLINK
3137          protocol == NETLINK_ROUTE ||
3138 #endif
3139          protocol == NETLINK_KOBJECT_UEVENT ||
3140          protocol == NETLINK_AUDIT)) {
3141         return -TARGET_EPROTONOSUPPORT;
3142     }
3143 
3144     if (domain == AF_PACKET ||
3145         (domain == AF_INET && type == SOCK_PACKET)) {
3146         protocol = tswap16(protocol);
3147     }
3148 
3149     ret = get_errno(socket(domain, type, protocol));
3150     if (ret >= 0) {
3151         ret = sock_flags_fixup(ret, target_type);
3152         if (type == SOCK_PACKET) {
3153             /* Manage an obsolete case :
3154              * if socket type is SOCK_PACKET, bind by name
3155              */
3156             fd_trans_register(ret, &target_packet_trans);
3157         } else if (domain == PF_NETLINK) {
3158             switch (protocol) {
3159 #ifdef CONFIG_RTNETLINK
3160             case NETLINK_ROUTE:
3161                 fd_trans_register(ret, &target_netlink_route_trans);
3162                 break;
3163 #endif
3164             case NETLINK_KOBJECT_UEVENT:
3165                 /* nothing to do: messages are strings */
3166                 break;
3167             case NETLINK_AUDIT:
3168                 fd_trans_register(ret, &target_netlink_audit_trans);
3169                 break;
3170             default:
3171                 g_assert_not_reached();
3172             }
3173         }
3174     }
3175     return ret;
3176 }
3177 
3178 /* do_bind() Must return target values and target errnos. */
3179 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3180                         socklen_t addrlen)
3181 {
3182     void *addr;
3183     abi_long ret;
3184 
3185     if ((int)addrlen < 0) {
3186         return -TARGET_EINVAL;
3187     }
3188 
3189     addr = alloca(addrlen+1);
3190 
3191     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3192     if (ret)
3193         return ret;
3194 
3195     return get_errno(bind(sockfd, addr, addrlen));
3196 }
3197 
3198 /* do_connect() Must return target values and target errnos. */
3199 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3200                            socklen_t addrlen)
3201 {
3202     void *addr;
3203     abi_long ret;
3204 
3205     if ((int)addrlen < 0) {
3206         return -TARGET_EINVAL;
3207     }
3208 
3209     addr = alloca(addrlen+1);
3210 
3211     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3212     if (ret)
3213         return ret;
3214 
3215     return get_errno(safe_connect(sockfd, addr, addrlen));
3216 }
3217 
3218 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3219 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3220                                       int flags, int send)
3221 {
3222     abi_long ret, len;
3223     struct msghdr msg;
3224     abi_ulong count;
3225     struct iovec *vec;
3226     abi_ulong target_vec;
3227 
3228     if (msgp->msg_name) {
3229         msg.msg_namelen = tswap32(msgp->msg_namelen);
3230         msg.msg_name = alloca(msg.msg_namelen+1);
3231         ret = target_to_host_sockaddr(fd, msg.msg_name,
3232                                       tswapal(msgp->msg_name),
3233                                       msg.msg_namelen);
3234         if (ret == -TARGET_EFAULT) {
3235             /* For connected sockets msg_name and msg_namelen must
3236              * be ignored, so returning EFAULT immediately is wrong.
3237              * Instead, pass a bad msg_name to the host kernel, and
3238              * let it decide whether to return EFAULT or not.
3239              */
3240             msg.msg_name = (void *)-1;
3241         } else if (ret) {
3242             goto out2;
3243         }
3244     } else {
3245         msg.msg_name = NULL;
3246         msg.msg_namelen = 0;
3247     }
3248     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3249     msg.msg_control = alloca(msg.msg_controllen);
3250     memset(msg.msg_control, 0, msg.msg_controllen);
3251 
3252     msg.msg_flags = tswap32(msgp->msg_flags);
3253 
3254     count = tswapal(msgp->msg_iovlen);
3255     target_vec = tswapal(msgp->msg_iov);
3256 
3257     if (count > IOV_MAX) {
3258         /* sendrcvmsg returns a different errno for this condition than
3259          * readv/writev, so we must catch it here before lock_iovec() does.
3260          */
3261         ret = -TARGET_EMSGSIZE;
3262         goto out2;
3263     }
3264 
3265     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3266                      target_vec, count, send);
3267     if (vec == NULL) {
3268         ret = -host_to_target_errno(errno);
3269         goto out2;
3270     }
3271     msg.msg_iovlen = count;
3272     msg.msg_iov = vec;
3273 
3274     if (send) {
3275         if (fd_trans_target_to_host_data(fd)) {
3276             void *host_msg;
3277 
3278             host_msg = g_malloc(msg.msg_iov->iov_len);
3279             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3280             ret = fd_trans_target_to_host_data(fd)(host_msg,
3281                                                    msg.msg_iov->iov_len);
3282             if (ret >= 0) {
3283                 msg.msg_iov->iov_base = host_msg;
3284                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3285             }
3286             g_free(host_msg);
3287         } else {
3288             ret = target_to_host_cmsg(&msg, msgp);
3289             if (ret == 0) {
3290                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3291             }
3292         }
3293     } else {
3294         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3295         if (!is_error(ret)) {
3296             len = ret;
3297             if (fd_trans_host_to_target_data(fd)) {
3298                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3299                                                MIN(msg.msg_iov->iov_len, len));
3300             } else {
3301                 ret = host_to_target_cmsg(msgp, &msg);
3302             }
3303             if (!is_error(ret)) {
3304                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3305                 msgp->msg_flags = tswap32(msg.msg_flags);
3306                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3307                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3308                                     msg.msg_name, msg.msg_namelen);
3309                     if (ret) {
3310                         goto out;
3311                     }
3312                 }
3313 
3314                 ret = len;
3315             }
3316         }
3317     }
3318 
3319 out:
3320     unlock_iovec(vec, target_vec, count, !send);
3321 out2:
3322     return ret;
3323 }
3324 
3325 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3326                                int flags, int send)
3327 {
3328     abi_long ret;
3329     struct target_msghdr *msgp;
3330 
3331     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3332                           msgp,
3333                           target_msg,
3334                           send ? 1 : 0)) {
3335         return -TARGET_EFAULT;
3336     }
3337     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3338     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3339     return ret;
3340 }
3341 
3342 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3343  * so it might not have this *mmsg-specific flag either.
3344  */
3345 #ifndef MSG_WAITFORONE
3346 #define MSG_WAITFORONE 0x10000
3347 #endif
3348 
3349 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3350                                 unsigned int vlen, unsigned int flags,
3351                                 int send)
3352 {
3353     struct target_mmsghdr *mmsgp;
3354     abi_long ret = 0;
3355     int i;
3356 
3357     if (vlen > UIO_MAXIOV) {
3358         vlen = UIO_MAXIOV;
3359     }
3360 
3361     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3362     if (!mmsgp) {
3363         return -TARGET_EFAULT;
3364     }
3365 
3366     for (i = 0; i < vlen; i++) {
3367         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3368         if (is_error(ret)) {
3369             break;
3370         }
3371         mmsgp[i].msg_len = tswap32(ret);
3372         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3373         if (flags & MSG_WAITFORONE) {
3374             flags |= MSG_DONTWAIT;
3375         }
3376     }
3377 
3378     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3379 
3380     /* Return number of datagrams sent if we sent any at all;
3381      * otherwise return the error.
3382      */
3383     if (i) {
3384         return i;
3385     }
3386     return ret;
3387 }
3388 
3389 /* do_accept4() Must return target values and target errnos. */
3390 static abi_long do_accept4(int fd, abi_ulong target_addr,
3391                            abi_ulong target_addrlen_addr, int flags)
3392 {
3393     socklen_t addrlen, ret_addrlen;
3394     void *addr;
3395     abi_long ret;
3396     int host_flags;
3397 
3398     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3399 
3400     if (target_addr == 0) {
3401         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3402     }
3403 
3404     /* linux returns EFAULT if addrlen pointer is invalid */
3405     if (get_user_u32(addrlen, target_addrlen_addr))
3406         return -TARGET_EFAULT;
3407 
3408     if ((int)addrlen < 0) {
3409         return -TARGET_EINVAL;
3410     }
3411 
3412     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3413         return -TARGET_EFAULT;
3414     }
3415 
3416     addr = alloca(addrlen);
3417 
3418     ret_addrlen = addrlen;
3419     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3420     if (!is_error(ret)) {
3421         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3422         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3423             ret = -TARGET_EFAULT;
3424         }
3425     }
3426     return ret;
3427 }
3428 
3429 /* do_getpeername() Must return target values and target errnos. */
3430 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3431                                abi_ulong target_addrlen_addr)
3432 {
3433     socklen_t addrlen, ret_addrlen;
3434     void *addr;
3435     abi_long ret;
3436 
3437     if (get_user_u32(addrlen, target_addrlen_addr))
3438         return -TARGET_EFAULT;
3439 
3440     if ((int)addrlen < 0) {
3441         return -TARGET_EINVAL;
3442     }
3443 
3444     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3445         return -TARGET_EFAULT;
3446     }
3447 
3448     addr = alloca(addrlen);
3449 
3450     ret_addrlen = addrlen;
3451     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3452     if (!is_error(ret)) {
3453         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3454         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3455             ret = -TARGET_EFAULT;
3456         }
3457     }
3458     return ret;
3459 }
3460 
3461 /* do_getsockname() Must return target values and target errnos. */
3462 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3463                                abi_ulong target_addrlen_addr)
3464 {
3465     socklen_t addrlen, ret_addrlen;
3466     void *addr;
3467     abi_long ret;
3468 
3469     if (get_user_u32(addrlen, target_addrlen_addr))
3470         return -TARGET_EFAULT;
3471 
3472     if ((int)addrlen < 0) {
3473         return -TARGET_EINVAL;
3474     }
3475 
3476     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3477         return -TARGET_EFAULT;
3478     }
3479 
3480     addr = alloca(addrlen);
3481 
3482     ret_addrlen = addrlen;
3483     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3484     if (!is_error(ret)) {
3485         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3486         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3487             ret = -TARGET_EFAULT;
3488         }
3489     }
3490     return ret;
3491 }
3492 
3493 /* do_socketpair() Must return target values and target errnos. */
3494 static abi_long do_socketpair(int domain, int type, int protocol,
3495                               abi_ulong target_tab_addr)
3496 {
3497     int tab[2];
3498     abi_long ret;
3499 
3500     target_to_host_sock_type(&type);
3501 
3502     ret = get_errno(socketpair(domain, type, protocol, tab));
3503     if (!is_error(ret)) {
3504         if (put_user_s32(tab[0], target_tab_addr)
3505             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3506             ret = -TARGET_EFAULT;
3507     }
3508     return ret;
3509 }
3510 
3511 /* do_sendto() Must return target values and target errnos. */
3512 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3513                           abi_ulong target_addr, socklen_t addrlen)
3514 {
3515     void *addr;
3516     void *host_msg;
3517     void *copy_msg = NULL;
3518     abi_long ret;
3519 
3520     if ((int)addrlen < 0) {
3521         return -TARGET_EINVAL;
3522     }
3523 
3524     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3525     if (!host_msg)
3526         return -TARGET_EFAULT;
3527     if (fd_trans_target_to_host_data(fd)) {
3528         copy_msg = host_msg;
3529         host_msg = g_malloc(len);
3530         memcpy(host_msg, copy_msg, len);
3531         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3532         if (ret < 0) {
3533             goto fail;
3534         }
3535     }
3536     if (target_addr) {
3537         addr = alloca(addrlen+1);
3538         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3539         if (ret) {
3540             goto fail;
3541         }
3542         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3543     } else {
3544         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3545     }
3546 fail:
3547     if (copy_msg) {
3548         g_free(host_msg);
3549         host_msg = copy_msg;
3550     }
3551     unlock_user(host_msg, msg, 0);
3552     return ret;
3553 }
3554 
3555 /* do_recvfrom() Must return target values and target errnos. */
3556 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3557                             abi_ulong target_addr,
3558                             abi_ulong target_addrlen)
3559 {
3560     socklen_t addrlen, ret_addrlen;
3561     void *addr;
3562     void *host_msg;
3563     abi_long ret;
3564 
3565     if (!msg) {
3566         host_msg = NULL;
3567     } else {
3568         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3569         if (!host_msg) {
3570             return -TARGET_EFAULT;
3571         }
3572     }
3573     if (target_addr) {
3574         if (get_user_u32(addrlen, target_addrlen)) {
3575             ret = -TARGET_EFAULT;
3576             goto fail;
3577         }
3578         if ((int)addrlen < 0) {
3579             ret = -TARGET_EINVAL;
3580             goto fail;
3581         }
3582         addr = alloca(addrlen);
3583         ret_addrlen = addrlen;
3584         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3585                                       addr, &ret_addrlen));
3586     } else {
3587         addr = NULL; /* To keep compiler quiet.  */
3588         addrlen = 0; /* To keep compiler quiet.  */
3589         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3590     }
3591     if (!is_error(ret)) {
3592         if (fd_trans_host_to_target_data(fd)) {
3593             abi_long trans;
3594             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3595             if (is_error(trans)) {
3596                 ret = trans;
3597                 goto fail;
3598             }
3599         }
3600         if (target_addr) {
3601             host_to_target_sockaddr(target_addr, addr,
3602                                     MIN(addrlen, ret_addrlen));
3603             if (put_user_u32(ret_addrlen, target_addrlen)) {
3604                 ret = -TARGET_EFAULT;
3605                 goto fail;
3606             }
3607         }
3608         unlock_user(host_msg, msg, len);
3609     } else {
3610 fail:
3611         unlock_user(host_msg, msg, 0);
3612     }
3613     return ret;
3614 }
3615 
3616 #ifdef TARGET_NR_socketcall
3617 /* do_socketcall() must return target values and target errnos. */
3618 static abi_long do_socketcall(int num, abi_ulong vptr)
3619 {
3620     static const unsigned nargs[] = { /* number of arguments per operation */
3621         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3622         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3623         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3624         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3625         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3626         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3627         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3628         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3629         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3630         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3631         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3632         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3633         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3634         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3635         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3636         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3637         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3638         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3639         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3640         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3641     };
3642     abi_long a[6]; /* max 6 args */
3643     unsigned i;
3644 
3645     /* check the range of the first argument num */
3646     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3647     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3648         return -TARGET_EINVAL;
3649     }
3650     /* ensure we have space for args */
3651     if (nargs[num] > ARRAY_SIZE(a)) {
3652         return -TARGET_EINVAL;
3653     }
3654     /* collect the arguments in a[] according to nargs[] */
3655     for (i = 0; i < nargs[num]; ++i) {
3656         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3657             return -TARGET_EFAULT;
3658         }
3659     }
3660     /* now when we have the args, invoke the appropriate underlying function */
3661     switch (num) {
3662     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3663         return do_socket(a[0], a[1], a[2]);
3664     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3665         return do_bind(a[0], a[1], a[2]);
3666     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3667         return do_connect(a[0], a[1], a[2]);
3668     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3669         return get_errno(listen(a[0], a[1]));
3670     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3671         return do_accept4(a[0], a[1], a[2], 0);
3672     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3673         return do_getsockname(a[0], a[1], a[2]);
3674     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3675         return do_getpeername(a[0], a[1], a[2]);
3676     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3677         return do_socketpair(a[0], a[1], a[2], a[3]);
3678     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3679         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3680     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3681         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3682     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3683         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3684     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3685         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3686     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3687         return get_errno(shutdown(a[0], a[1]));
3688     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3689         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3690     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3691         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3692     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3693         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3694     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3695         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3696     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3697         return do_accept4(a[0], a[1], a[2], a[3]);
3698     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3699         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3700     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3701         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3702     default:
3703         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3704         return -TARGET_EINVAL;
3705     }
3706 }
3707 #endif
3708 
3709 #define N_SHM_REGIONS	32
3710 
3711 static struct shm_region {
3712     abi_ulong start;
3713     abi_ulong size;
3714     bool in_use;
3715 } shm_regions[N_SHM_REGIONS];
3716 
3717 #ifndef TARGET_SEMID64_DS
3718 /* asm-generic version of this struct */
3719 struct target_semid64_ds
3720 {
3721   struct target_ipc_perm sem_perm;
3722   abi_ulong sem_otime;
3723 #if TARGET_ABI_BITS == 32
3724   abi_ulong __unused1;
3725 #endif
3726   abi_ulong sem_ctime;
3727 #if TARGET_ABI_BITS == 32
3728   abi_ulong __unused2;
3729 #endif
3730   abi_ulong sem_nsems;
3731   abi_ulong __unused3;
3732   abi_ulong __unused4;
3733 };
3734 #endif
3735 
3736 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3737                                                abi_ulong target_addr)
3738 {
3739     struct target_ipc_perm *target_ip;
3740     struct target_semid64_ds *target_sd;
3741 
3742     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3743         return -TARGET_EFAULT;
3744     target_ip = &(target_sd->sem_perm);
3745     host_ip->__key = tswap32(target_ip->__key);
3746     host_ip->uid = tswap32(target_ip->uid);
3747     host_ip->gid = tswap32(target_ip->gid);
3748     host_ip->cuid = tswap32(target_ip->cuid);
3749     host_ip->cgid = tswap32(target_ip->cgid);
3750 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3751     host_ip->mode = tswap32(target_ip->mode);
3752 #else
3753     host_ip->mode = tswap16(target_ip->mode);
3754 #endif
3755 #if defined(TARGET_PPC)
3756     host_ip->__seq = tswap32(target_ip->__seq);
3757 #else
3758     host_ip->__seq = tswap16(target_ip->__seq);
3759 #endif
3760     unlock_user_struct(target_sd, target_addr, 0);
3761     return 0;
3762 }
3763 
3764 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3765                                                struct ipc_perm *host_ip)
3766 {
3767     struct target_ipc_perm *target_ip;
3768     struct target_semid64_ds *target_sd;
3769 
3770     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3771         return -TARGET_EFAULT;
3772     target_ip = &(target_sd->sem_perm);
3773     target_ip->__key = tswap32(host_ip->__key);
3774     target_ip->uid = tswap32(host_ip->uid);
3775     target_ip->gid = tswap32(host_ip->gid);
3776     target_ip->cuid = tswap32(host_ip->cuid);
3777     target_ip->cgid = tswap32(host_ip->cgid);
3778 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3779     target_ip->mode = tswap32(host_ip->mode);
3780 #else
3781     target_ip->mode = tswap16(host_ip->mode);
3782 #endif
3783 #if defined(TARGET_PPC)
3784     target_ip->__seq = tswap32(host_ip->__seq);
3785 #else
3786     target_ip->__seq = tswap16(host_ip->__seq);
3787 #endif
3788     unlock_user_struct(target_sd, target_addr, 1);
3789     return 0;
3790 }
3791 
3792 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3793                                                abi_ulong target_addr)
3794 {
3795     struct target_semid64_ds *target_sd;
3796 
3797     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3798         return -TARGET_EFAULT;
3799     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3800         return -TARGET_EFAULT;
3801     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3802     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3803     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3804     unlock_user_struct(target_sd, target_addr, 0);
3805     return 0;
3806 }
3807 
3808 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3809                                                struct semid_ds *host_sd)
3810 {
3811     struct target_semid64_ds *target_sd;
3812 
3813     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3814         return -TARGET_EFAULT;
3815     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3816         return -TARGET_EFAULT;
3817     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3818     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3819     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3820     unlock_user_struct(target_sd, target_addr, 1);
3821     return 0;
3822 }
3823 
3824 struct target_seminfo {
3825     int semmap;
3826     int semmni;
3827     int semmns;
3828     int semmnu;
3829     int semmsl;
3830     int semopm;
3831     int semume;
3832     int semusz;
3833     int semvmx;
3834     int semaem;
3835 };
3836 
3837 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3838                                               struct seminfo *host_seminfo)
3839 {
3840     struct target_seminfo *target_seminfo;
3841     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3842         return -TARGET_EFAULT;
3843     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3844     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3845     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3846     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3847     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3848     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3849     __put_user(host_seminfo->semume, &target_seminfo->semume);
3850     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3851     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3852     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3853     unlock_user_struct(target_seminfo, target_addr, 1);
3854     return 0;
3855 }
3856 
3857 union semun {
3858 	int val;
3859 	struct semid_ds *buf;
3860 	unsigned short *array;
3861 	struct seminfo *__buf;
3862 };
3863 
3864 union target_semun {
3865 	int val;
3866 	abi_ulong buf;
3867 	abi_ulong array;
3868 	abi_ulong __buf;
3869 };
3870 
3871 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3872                                                abi_ulong target_addr)
3873 {
3874     int nsems;
3875     unsigned short *array;
3876     union semun semun;
3877     struct semid_ds semid_ds;
3878     int i, ret;
3879 
3880     semun.buf = &semid_ds;
3881 
3882     ret = semctl(semid, 0, IPC_STAT, semun);
3883     if (ret == -1)
3884         return get_errno(ret);
3885 
3886     nsems = semid_ds.sem_nsems;
3887 
3888     *host_array = g_try_new(unsigned short, nsems);
3889     if (!*host_array) {
3890         return -TARGET_ENOMEM;
3891     }
3892     array = lock_user(VERIFY_READ, target_addr,
3893                       nsems*sizeof(unsigned short), 1);
3894     if (!array) {
3895         g_free(*host_array);
3896         return -TARGET_EFAULT;
3897     }
3898 
3899     for(i=0; i<nsems; i++) {
3900         __get_user((*host_array)[i], &array[i]);
3901     }
3902     unlock_user(array, target_addr, 0);
3903 
3904     return 0;
3905 }
3906 
3907 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3908                                                unsigned short **host_array)
3909 {
3910     int nsems;
3911     unsigned short *array;
3912     union semun semun;
3913     struct semid_ds semid_ds;
3914     int i, ret;
3915 
3916     semun.buf = &semid_ds;
3917 
3918     ret = semctl(semid, 0, IPC_STAT, semun);
3919     if (ret == -1)
3920         return get_errno(ret);
3921 
3922     nsems = semid_ds.sem_nsems;
3923 
3924     array = lock_user(VERIFY_WRITE, target_addr,
3925                       nsems*sizeof(unsigned short), 0);
3926     if (!array)
3927         return -TARGET_EFAULT;
3928 
3929     for(i=0; i<nsems; i++) {
3930         __put_user((*host_array)[i], &array[i]);
3931     }
3932     g_free(*host_array);
3933     unlock_user(array, target_addr, 1);
3934 
3935     return 0;
3936 }
3937 
3938 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3939                                  abi_ulong target_arg)
3940 {
3941     union target_semun target_su = { .buf = target_arg };
3942     union semun arg;
3943     struct semid_ds dsarg;
3944     unsigned short *array = NULL;
3945     struct seminfo seminfo;
3946     abi_long ret = -TARGET_EINVAL;
3947     abi_long err;
3948     cmd &= 0xff;
3949 
3950     switch( cmd ) {
3951 	case GETVAL:
3952 	case SETVAL:
3953             /* In 64 bit cross-endian situations, we will erroneously pick up
3954              * the wrong half of the union for the "val" element.  To rectify
3955              * this, the entire 8-byte structure is byteswapped, followed by
3956 	     * a swap of the 4 byte val field. In other cases, the data is
3957 	     * already in proper host byte order. */
3958 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3959 		target_su.buf = tswapal(target_su.buf);
3960 		arg.val = tswap32(target_su.val);
3961 	    } else {
3962 		arg.val = target_su.val;
3963 	    }
3964             ret = get_errno(semctl(semid, semnum, cmd, arg));
3965             break;
3966 	case GETALL:
3967 	case SETALL:
3968             err = target_to_host_semarray(semid, &array, target_su.array);
3969             if (err)
3970                 return err;
3971             arg.array = array;
3972             ret = get_errno(semctl(semid, semnum, cmd, arg));
3973             err = host_to_target_semarray(semid, target_su.array, &array);
3974             if (err)
3975                 return err;
3976             break;
3977 	case IPC_STAT:
3978 	case IPC_SET:
3979 	case SEM_STAT:
3980             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3981             if (err)
3982                 return err;
3983             arg.buf = &dsarg;
3984             ret = get_errno(semctl(semid, semnum, cmd, arg));
3985             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3986             if (err)
3987                 return err;
3988             break;
3989 	case IPC_INFO:
3990 	case SEM_INFO:
3991             arg.__buf = &seminfo;
3992             ret = get_errno(semctl(semid, semnum, cmd, arg));
3993             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3994             if (err)
3995                 return err;
3996             break;
3997 	case IPC_RMID:
3998 	case GETPID:
3999 	case GETNCNT:
4000 	case GETZCNT:
4001             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4002             break;
4003     }
4004 
4005     return ret;
4006 }
4007 
4008 struct target_sembuf {
4009     unsigned short sem_num;
4010     short sem_op;
4011     short sem_flg;
4012 };
4013 
4014 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4015                                              abi_ulong target_addr,
4016                                              unsigned nsops)
4017 {
4018     struct target_sembuf *target_sembuf;
4019     int i;
4020 
4021     target_sembuf = lock_user(VERIFY_READ, target_addr,
4022                               nsops*sizeof(struct target_sembuf), 1);
4023     if (!target_sembuf)
4024         return -TARGET_EFAULT;
4025 
4026     for(i=0; i<nsops; i++) {
4027         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4028         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4029         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4030     }
4031 
4032     unlock_user(target_sembuf, target_addr, 0);
4033 
4034     return 0;
4035 }
4036 
4037 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4038     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4039 
4040 /*
4041  * This macro is required to handle the s390 variants, which passes the
4042  * arguments in a different order than default.
4043  */
4044 #ifdef __s390x__
4045 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4046   (__nsops), (__timeout), (__sops)
4047 #else
4048 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4049   (__nsops), 0, (__sops), (__timeout)
4050 #endif
4051 
4052 static inline abi_long do_semtimedop(int semid,
4053                                      abi_long ptr,
4054                                      unsigned nsops,
4055                                      abi_long timeout, bool time64)
4056 {
4057     struct sembuf *sops;
4058     struct timespec ts, *pts = NULL;
4059     abi_long ret;
4060 
4061     if (timeout) {
4062         pts = &ts;
4063         if (time64) {
4064             if (target_to_host_timespec64(pts, timeout)) {
4065                 return -TARGET_EFAULT;
4066             }
4067         } else {
4068             if (target_to_host_timespec(pts, timeout)) {
4069                 return -TARGET_EFAULT;
4070             }
4071         }
4072     }
4073 
4074     if (nsops > TARGET_SEMOPM) {
4075         return -TARGET_E2BIG;
4076     }
4077 
4078     sops = g_new(struct sembuf, nsops);
4079 
4080     if (target_to_host_sembuf(sops, ptr, nsops)) {
4081         g_free(sops);
4082         return -TARGET_EFAULT;
4083     }
4084 
4085     ret = -TARGET_ENOSYS;
4086 #ifdef __NR_semtimedop
4087     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4088 #endif
4089 #ifdef __NR_ipc
4090     if (ret == -TARGET_ENOSYS) {
4091         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4092                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4093     }
4094 #endif
4095     g_free(sops);
4096     return ret;
4097 }
4098 #endif
4099 
4100 struct target_msqid_ds
4101 {
4102     struct target_ipc_perm msg_perm;
4103     abi_ulong msg_stime;
4104 #if TARGET_ABI_BITS == 32
4105     abi_ulong __unused1;
4106 #endif
4107     abi_ulong msg_rtime;
4108 #if TARGET_ABI_BITS == 32
4109     abi_ulong __unused2;
4110 #endif
4111     abi_ulong msg_ctime;
4112 #if TARGET_ABI_BITS == 32
4113     abi_ulong __unused3;
4114 #endif
4115     abi_ulong __msg_cbytes;
4116     abi_ulong msg_qnum;
4117     abi_ulong msg_qbytes;
4118     abi_ulong msg_lspid;
4119     abi_ulong msg_lrpid;
4120     abi_ulong __unused4;
4121     abi_ulong __unused5;
4122 };
4123 
4124 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4125                                                abi_ulong target_addr)
4126 {
4127     struct target_msqid_ds *target_md;
4128 
4129     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4130         return -TARGET_EFAULT;
4131     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4132         return -TARGET_EFAULT;
4133     host_md->msg_stime = tswapal(target_md->msg_stime);
4134     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4135     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4136     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4137     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4138     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4139     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4140     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4141     unlock_user_struct(target_md, target_addr, 0);
4142     return 0;
4143 }
4144 
4145 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4146                                                struct msqid_ds *host_md)
4147 {
4148     struct target_msqid_ds *target_md;
4149 
4150     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4151         return -TARGET_EFAULT;
4152     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4153         return -TARGET_EFAULT;
4154     target_md->msg_stime = tswapal(host_md->msg_stime);
4155     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4156     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4157     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4158     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4159     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4160     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4161     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4162     unlock_user_struct(target_md, target_addr, 1);
4163     return 0;
4164 }
4165 
4166 struct target_msginfo {
4167     int msgpool;
4168     int msgmap;
4169     int msgmax;
4170     int msgmnb;
4171     int msgmni;
4172     int msgssz;
4173     int msgtql;
4174     unsigned short int msgseg;
4175 };
4176 
4177 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4178                                               struct msginfo *host_msginfo)
4179 {
4180     struct target_msginfo *target_msginfo;
4181     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4182         return -TARGET_EFAULT;
4183     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4184     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4185     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4186     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4187     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4188     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4189     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4190     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4191     unlock_user_struct(target_msginfo, target_addr, 1);
4192     return 0;
4193 }
4194 
4195 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4196 {
4197     struct msqid_ds dsarg;
4198     struct msginfo msginfo;
4199     abi_long ret = -TARGET_EINVAL;
4200 
4201     cmd &= 0xff;
4202 
4203     switch (cmd) {
4204     case IPC_STAT:
4205     case IPC_SET:
4206     case MSG_STAT:
4207         if (target_to_host_msqid_ds(&dsarg,ptr))
4208             return -TARGET_EFAULT;
4209         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4210         if (host_to_target_msqid_ds(ptr,&dsarg))
4211             return -TARGET_EFAULT;
4212         break;
4213     case IPC_RMID:
4214         ret = get_errno(msgctl(msgid, cmd, NULL));
4215         break;
4216     case IPC_INFO:
4217     case MSG_INFO:
4218         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4219         if (host_to_target_msginfo(ptr, &msginfo))
4220             return -TARGET_EFAULT;
4221         break;
4222     }
4223 
4224     return ret;
4225 }
4226 
4227 struct target_msgbuf {
4228     abi_long mtype;
4229     char	mtext[1];
4230 };
4231 
4232 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4233                                  ssize_t msgsz, int msgflg)
4234 {
4235     struct target_msgbuf *target_mb;
4236     struct msgbuf *host_mb;
4237     abi_long ret = 0;
4238 
4239     if (msgsz < 0) {
4240         return -TARGET_EINVAL;
4241     }
4242 
4243     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4244         return -TARGET_EFAULT;
4245     host_mb = g_try_malloc(msgsz + sizeof(long));
4246     if (!host_mb) {
4247         unlock_user_struct(target_mb, msgp, 0);
4248         return -TARGET_ENOMEM;
4249     }
4250     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4251     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4252     ret = -TARGET_ENOSYS;
4253 #ifdef __NR_msgsnd
4254     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4255 #endif
4256 #ifdef __NR_ipc
4257     if (ret == -TARGET_ENOSYS) {
4258 #ifdef __s390x__
4259         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4260                                  host_mb));
4261 #else
4262         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4263                                  host_mb, 0));
4264 #endif
4265     }
4266 #endif
4267     g_free(host_mb);
4268     unlock_user_struct(target_mb, msgp, 0);
4269 
4270     return ret;
4271 }
4272 
4273 #ifdef __NR_ipc
4274 #if defined(__sparc__)
4275 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4276 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4277 #elif defined(__s390x__)
4278 /* The s390 sys_ipc variant has only five parameters.  */
4279 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4280     ((long int[]){(long int)__msgp, __msgtyp})
4281 #else
4282 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4283     ((long int[]){(long int)__msgp, __msgtyp}), 0
4284 #endif
4285 #endif
4286 
4287 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4288                                  ssize_t msgsz, abi_long msgtyp,
4289                                  int msgflg)
4290 {
4291     struct target_msgbuf *target_mb;
4292     char *target_mtext;
4293     struct msgbuf *host_mb;
4294     abi_long ret = 0;
4295 
4296     if (msgsz < 0) {
4297         return -TARGET_EINVAL;
4298     }
4299 
4300     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4301         return -TARGET_EFAULT;
4302 
4303     host_mb = g_try_malloc(msgsz + sizeof(long));
4304     if (!host_mb) {
4305         ret = -TARGET_ENOMEM;
4306         goto end;
4307     }
4308     ret = -TARGET_ENOSYS;
4309 #ifdef __NR_msgrcv
4310     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4311 #endif
4312 #ifdef __NR_ipc
4313     if (ret == -TARGET_ENOSYS) {
4314         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4315                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4316     }
4317 #endif
4318 
4319     if (ret > 0) {
4320         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4321         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4322         if (!target_mtext) {
4323             ret = -TARGET_EFAULT;
4324             goto end;
4325         }
4326         memcpy(target_mb->mtext, host_mb->mtext, ret);
4327         unlock_user(target_mtext, target_mtext_addr, ret);
4328     }
4329 
4330     target_mb->mtype = tswapal(host_mb->mtype);
4331 
4332 end:
4333     if (target_mb)
4334         unlock_user_struct(target_mb, msgp, 1);
4335     g_free(host_mb);
4336     return ret;
4337 }
4338 
4339 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4340                                                abi_ulong target_addr)
4341 {
4342     struct target_shmid_ds *target_sd;
4343 
4344     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4345         return -TARGET_EFAULT;
4346     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4347         return -TARGET_EFAULT;
4348     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4349     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4350     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4351     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4352     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4353     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4354     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4355     unlock_user_struct(target_sd, target_addr, 0);
4356     return 0;
4357 }
4358 
4359 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4360                                                struct shmid_ds *host_sd)
4361 {
4362     struct target_shmid_ds *target_sd;
4363 
4364     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4365         return -TARGET_EFAULT;
4366     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4367         return -TARGET_EFAULT;
4368     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4369     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4370     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4371     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4372     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4373     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4374     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4375     unlock_user_struct(target_sd, target_addr, 1);
4376     return 0;
4377 }
4378 
4379 struct  target_shminfo {
4380     abi_ulong shmmax;
4381     abi_ulong shmmin;
4382     abi_ulong shmmni;
4383     abi_ulong shmseg;
4384     abi_ulong shmall;
4385 };
4386 
4387 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4388                                               struct shminfo *host_shminfo)
4389 {
4390     struct target_shminfo *target_shminfo;
4391     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4392         return -TARGET_EFAULT;
4393     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4394     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4395     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4396     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4397     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4398     unlock_user_struct(target_shminfo, target_addr, 1);
4399     return 0;
4400 }
4401 
4402 struct target_shm_info {
4403     int used_ids;
4404     abi_ulong shm_tot;
4405     abi_ulong shm_rss;
4406     abi_ulong shm_swp;
4407     abi_ulong swap_attempts;
4408     abi_ulong swap_successes;
4409 };
4410 
4411 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4412                                                struct shm_info *host_shm_info)
4413 {
4414     struct target_shm_info *target_shm_info;
4415     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4416         return -TARGET_EFAULT;
4417     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4418     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4419     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4420     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4421     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4422     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4423     unlock_user_struct(target_shm_info, target_addr, 1);
4424     return 0;
4425 }
4426 
4427 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4428 {
4429     struct shmid_ds dsarg;
4430     struct shminfo shminfo;
4431     struct shm_info shm_info;
4432     abi_long ret = -TARGET_EINVAL;
4433 
4434     cmd &= 0xff;
4435 
4436     switch(cmd) {
4437     case IPC_STAT:
4438     case IPC_SET:
4439     case SHM_STAT:
4440         if (target_to_host_shmid_ds(&dsarg, buf))
4441             return -TARGET_EFAULT;
4442         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4443         if (host_to_target_shmid_ds(buf, &dsarg))
4444             return -TARGET_EFAULT;
4445         break;
4446     case IPC_INFO:
4447         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4448         if (host_to_target_shminfo(buf, &shminfo))
4449             return -TARGET_EFAULT;
4450         break;
4451     case SHM_INFO:
4452         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4453         if (host_to_target_shm_info(buf, &shm_info))
4454             return -TARGET_EFAULT;
4455         break;
4456     case IPC_RMID:
4457     case SHM_LOCK:
4458     case SHM_UNLOCK:
4459         ret = get_errno(shmctl(shmid, cmd, NULL));
4460         break;
4461     }
4462 
4463     return ret;
4464 }
4465 
4466 #ifndef TARGET_FORCE_SHMLBA
4467 /* For most architectures, SHMLBA is the same as the page size;
4468  * some architectures have larger values, in which case they should
4469  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4470  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4471  * and defining its own value for SHMLBA.
4472  *
4473  * The kernel also permits SHMLBA to be set by the architecture to a
4474  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4475  * this means that addresses are rounded to the large size if
4476  * SHM_RND is set but addresses not aligned to that size are not rejected
4477  * as long as they are at least page-aligned. Since the only architecture
4478  * which uses this is ia64 this code doesn't provide for that oddity.
4479  */
4480 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4481 {
4482     return TARGET_PAGE_SIZE;
4483 }
4484 #endif
4485 
4486 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4487                                  int shmid, abi_ulong shmaddr, int shmflg)
4488 {
4489     CPUState *cpu = env_cpu(cpu_env);
4490     abi_long raddr;
4491     void *host_raddr;
4492     struct shmid_ds shm_info;
4493     int i,ret;
4494     abi_ulong shmlba;
4495 
4496     /* shmat pointers are always untagged */
4497 
4498     /* find out the length of the shared memory segment */
4499     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4500     if (is_error(ret)) {
4501         /* can't get length, bail out */
4502         return ret;
4503     }
4504 
4505     shmlba = target_shmlba(cpu_env);
4506 
4507     if (shmaddr & (shmlba - 1)) {
4508         if (shmflg & SHM_RND) {
4509             shmaddr &= ~(shmlba - 1);
4510         } else {
4511             return -TARGET_EINVAL;
4512         }
4513     }
4514     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4515         return -TARGET_EINVAL;
4516     }
4517 
4518     mmap_lock();
4519 
4520     /*
4521      * We're mapping shared memory, so ensure we generate code for parallel
4522      * execution and flush old translations.  This will work up to the level
4523      * supported by the host -- anything that requires EXCP_ATOMIC will not
4524      * be atomic with respect to an external process.
4525      */
4526     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4527         cpu->tcg_cflags |= CF_PARALLEL;
4528         tb_flush(cpu);
4529     }
4530 
4531     if (shmaddr)
4532         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4533     else {
4534         abi_ulong mmap_start;
4535 
4536         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4537         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4538 
4539         if (mmap_start == -1) {
4540             errno = ENOMEM;
4541             host_raddr = (void *)-1;
4542         } else
4543             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4544                                shmflg | SHM_REMAP);
4545     }
4546 
4547     if (host_raddr == (void *)-1) {
4548         mmap_unlock();
4549         return get_errno((long)host_raddr);
4550     }
4551     raddr=h2g((unsigned long)host_raddr);
4552 
4553     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4554                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4555                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4556 
4557     for (i = 0; i < N_SHM_REGIONS; i++) {
4558         if (!shm_regions[i].in_use) {
4559             shm_regions[i].in_use = true;
4560             shm_regions[i].start = raddr;
4561             shm_regions[i].size = shm_info.shm_segsz;
4562             break;
4563         }
4564     }
4565 
4566     mmap_unlock();
4567     return raddr;
4568 
4569 }
4570 
4571 static inline abi_long do_shmdt(abi_ulong shmaddr)
4572 {
4573     int i;
4574     abi_long rv;
4575 
4576     /* shmdt pointers are always untagged */
4577 
4578     mmap_lock();
4579 
4580     for (i = 0; i < N_SHM_REGIONS; ++i) {
4581         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4582             shm_regions[i].in_use = false;
4583             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4584             break;
4585         }
4586     }
4587     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4588 
4589     mmap_unlock();
4590 
4591     return rv;
4592 }
4593 
4594 #ifdef TARGET_NR_ipc
4595 /* ??? This only works with linear mappings.  */
4596 /* do_ipc() must return target values and target errnos. */
4597 static abi_long do_ipc(CPUArchState *cpu_env,
4598                        unsigned int call, abi_long first,
4599                        abi_long second, abi_long third,
4600                        abi_long ptr, abi_long fifth)
4601 {
4602     int version;
4603     abi_long ret = 0;
4604 
4605     version = call >> 16;
4606     call &= 0xffff;
4607 
4608     switch (call) {
4609     case IPCOP_semop:
4610         ret = do_semtimedop(first, ptr, second, 0, false);
4611         break;
4612     case IPCOP_semtimedop:
4613     /*
4614      * The s390 sys_ipc variant has only five parameters instead of six
4615      * (as for default variant) and the only difference is the handling of
4616      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4617      * to a struct timespec where the generic variant uses fifth parameter.
4618      */
4619 #if defined(TARGET_S390X)
4620         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4621 #else
4622         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4623 #endif
4624         break;
4625 
4626     case IPCOP_semget:
4627         ret = get_errno(semget(first, second, third));
4628         break;
4629 
4630     case IPCOP_semctl: {
4631         /* The semun argument to semctl is passed by value, so dereference the
4632          * ptr argument. */
4633         abi_ulong atptr;
4634         get_user_ual(atptr, ptr);
4635         ret = do_semctl(first, second, third, atptr);
4636         break;
4637     }
4638 
4639     case IPCOP_msgget:
4640         ret = get_errno(msgget(first, second));
4641         break;
4642 
4643     case IPCOP_msgsnd:
4644         ret = do_msgsnd(first, ptr, second, third);
4645         break;
4646 
4647     case IPCOP_msgctl:
4648         ret = do_msgctl(first, second, ptr);
4649         break;
4650 
4651     case IPCOP_msgrcv:
4652         switch (version) {
4653         case 0:
4654             {
4655                 struct target_ipc_kludge {
4656                     abi_long msgp;
4657                     abi_long msgtyp;
4658                 } *tmp;
4659 
4660                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4661                     ret = -TARGET_EFAULT;
4662                     break;
4663                 }
4664 
4665                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4666 
4667                 unlock_user_struct(tmp, ptr, 0);
4668                 break;
4669             }
4670         default:
4671             ret = do_msgrcv(first, ptr, second, fifth, third);
4672         }
4673         break;
4674 
4675     case IPCOP_shmat:
4676         switch (version) {
4677         default:
4678         {
4679             abi_ulong raddr;
4680             raddr = do_shmat(cpu_env, first, ptr, second);
4681             if (is_error(raddr))
4682                 return get_errno(raddr);
4683             if (put_user_ual(raddr, third))
4684                 return -TARGET_EFAULT;
4685             break;
4686         }
4687         case 1:
4688             ret = -TARGET_EINVAL;
4689             break;
4690         }
4691 	break;
4692     case IPCOP_shmdt:
4693         ret = do_shmdt(ptr);
4694 	break;
4695 
4696     case IPCOP_shmget:
4697 	/* IPC_* flag values are the same on all linux platforms */
4698 	ret = get_errno(shmget(first, second, third));
4699 	break;
4700 
4701 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4702     case IPCOP_shmctl:
4703         ret = do_shmctl(first, second, ptr);
4704         break;
4705     default:
4706         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4707                       call, version);
4708 	ret = -TARGET_ENOSYS;
4709 	break;
4710     }
4711     return ret;
4712 }
4713 #endif
4714 
4715 /* kernel structure types definitions */
4716 
4717 #define STRUCT(name, ...) STRUCT_ ## name,
4718 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4719 enum {
4720 #include "syscall_types.h"
4721 STRUCT_MAX
4722 };
4723 #undef STRUCT
4724 #undef STRUCT_SPECIAL
4725 
4726 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4727 #define STRUCT_SPECIAL(name)
4728 #include "syscall_types.h"
4729 #undef STRUCT
4730 #undef STRUCT_SPECIAL
4731 
4732 #define MAX_STRUCT_SIZE 4096
4733 
4734 #ifdef CONFIG_FIEMAP
4735 /* So fiemap access checks don't overflow on 32 bit systems.
4736  * This is very slightly smaller than the limit imposed by
4737  * the underlying kernel.
4738  */
4739 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4740                             / sizeof(struct fiemap_extent))
4741 
4742 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4743                                        int fd, int cmd, abi_long arg)
4744 {
4745     /* The parameter for this ioctl is a struct fiemap followed
4746      * by an array of struct fiemap_extent whose size is set
4747      * in fiemap->fm_extent_count. The array is filled in by the
4748      * ioctl.
4749      */
4750     int target_size_in, target_size_out;
4751     struct fiemap *fm;
4752     const argtype *arg_type = ie->arg_type;
4753     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4754     void *argptr, *p;
4755     abi_long ret;
4756     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4757     uint32_t outbufsz;
4758     int free_fm = 0;
4759 
4760     assert(arg_type[0] == TYPE_PTR);
4761     assert(ie->access == IOC_RW);
4762     arg_type++;
4763     target_size_in = thunk_type_size(arg_type, 0);
4764     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4765     if (!argptr) {
4766         return -TARGET_EFAULT;
4767     }
4768     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4769     unlock_user(argptr, arg, 0);
4770     fm = (struct fiemap *)buf_temp;
4771     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4772         return -TARGET_EINVAL;
4773     }
4774 
4775     outbufsz = sizeof (*fm) +
4776         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4777 
4778     if (outbufsz > MAX_STRUCT_SIZE) {
4779         /* We can't fit all the extents into the fixed size buffer.
4780          * Allocate one that is large enough and use it instead.
4781          */
4782         fm = g_try_malloc(outbufsz);
4783         if (!fm) {
4784             return -TARGET_ENOMEM;
4785         }
4786         memcpy(fm, buf_temp, sizeof(struct fiemap));
4787         free_fm = 1;
4788     }
4789     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4790     if (!is_error(ret)) {
4791         target_size_out = target_size_in;
4792         /* An extent_count of 0 means we were only counting the extents
4793          * so there are no structs to copy
4794          */
4795         if (fm->fm_extent_count != 0) {
4796             target_size_out += fm->fm_mapped_extents * extent_size;
4797         }
4798         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4799         if (!argptr) {
4800             ret = -TARGET_EFAULT;
4801         } else {
4802             /* Convert the struct fiemap */
4803             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4804             if (fm->fm_extent_count != 0) {
4805                 p = argptr + target_size_in;
4806                 /* ...and then all the struct fiemap_extents */
4807                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4808                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4809                                   THUNK_TARGET);
4810                     p += extent_size;
4811                 }
4812             }
4813             unlock_user(argptr, arg, target_size_out);
4814         }
4815     }
4816     if (free_fm) {
4817         g_free(fm);
4818     }
4819     return ret;
4820 }
4821 #endif
4822 
4823 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4824                                 int fd, int cmd, abi_long arg)
4825 {
4826     const argtype *arg_type = ie->arg_type;
4827     int target_size;
4828     void *argptr;
4829     int ret;
4830     struct ifconf *host_ifconf;
4831     uint32_t outbufsz;
4832     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4833     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4834     int target_ifreq_size;
4835     int nb_ifreq;
4836     int free_buf = 0;
4837     int i;
4838     int target_ifc_len;
4839     abi_long target_ifc_buf;
4840     int host_ifc_len;
4841     char *host_ifc_buf;
4842 
4843     assert(arg_type[0] == TYPE_PTR);
4844     assert(ie->access == IOC_RW);
4845 
4846     arg_type++;
4847     target_size = thunk_type_size(arg_type, 0);
4848 
4849     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4850     if (!argptr)
4851         return -TARGET_EFAULT;
4852     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4853     unlock_user(argptr, arg, 0);
4854 
4855     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4856     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4857     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4858 
4859     if (target_ifc_buf != 0) {
4860         target_ifc_len = host_ifconf->ifc_len;
4861         nb_ifreq = target_ifc_len / target_ifreq_size;
4862         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4863 
4864         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4865         if (outbufsz > MAX_STRUCT_SIZE) {
4866             /*
4867              * We can't fit all the extents into the fixed size buffer.
4868              * Allocate one that is large enough and use it instead.
4869              */
4870             host_ifconf = malloc(outbufsz);
4871             if (!host_ifconf) {
4872                 return -TARGET_ENOMEM;
4873             }
4874             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4875             free_buf = 1;
4876         }
4877         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4878 
4879         host_ifconf->ifc_len = host_ifc_len;
4880     } else {
4881       host_ifc_buf = NULL;
4882     }
4883     host_ifconf->ifc_buf = host_ifc_buf;
4884 
4885     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4886     if (!is_error(ret)) {
4887 	/* convert host ifc_len to target ifc_len */
4888 
4889         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4890         target_ifc_len = nb_ifreq * target_ifreq_size;
4891         host_ifconf->ifc_len = target_ifc_len;
4892 
4893 	/* restore target ifc_buf */
4894 
4895         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4896 
4897 	/* copy struct ifconf to target user */
4898 
4899         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4900         if (!argptr)
4901             return -TARGET_EFAULT;
4902         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4903         unlock_user(argptr, arg, target_size);
4904 
4905         if (target_ifc_buf != 0) {
4906             /* copy ifreq[] to target user */
4907             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4908             for (i = 0; i < nb_ifreq ; i++) {
4909                 thunk_convert(argptr + i * target_ifreq_size,
4910                               host_ifc_buf + i * sizeof(struct ifreq),
4911                               ifreq_arg_type, THUNK_TARGET);
4912             }
4913             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4914         }
4915     }
4916 
4917     if (free_buf) {
4918         free(host_ifconf);
4919     }
4920 
4921     return ret;
4922 }
4923 
4924 #if defined(CONFIG_USBFS)
4925 #if HOST_LONG_BITS > 64
4926 #error USBDEVFS thunks do not support >64 bit hosts yet.
4927 #endif
4928 struct live_urb {
4929     uint64_t target_urb_adr;
4930     uint64_t target_buf_adr;
4931     char *target_buf_ptr;
4932     struct usbdevfs_urb host_urb;
4933 };
4934 
4935 static GHashTable *usbdevfs_urb_hashtable(void)
4936 {
4937     static GHashTable *urb_hashtable;
4938 
4939     if (!urb_hashtable) {
4940         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4941     }
4942     return urb_hashtable;
4943 }
4944 
4945 static void urb_hashtable_insert(struct live_urb *urb)
4946 {
4947     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4948     g_hash_table_insert(urb_hashtable, urb, urb);
4949 }
4950 
4951 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4952 {
4953     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4954     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4955 }
4956 
4957 static void urb_hashtable_remove(struct live_urb *urb)
4958 {
4959     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4960     g_hash_table_remove(urb_hashtable, urb);
4961 }
4962 
4963 static abi_long
4964 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4965                           int fd, int cmd, abi_long arg)
4966 {
4967     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4968     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4969     struct live_urb *lurb;
4970     void *argptr;
4971     uint64_t hurb;
4972     int target_size;
4973     uintptr_t target_urb_adr;
4974     abi_long ret;
4975 
4976     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4977 
4978     memset(buf_temp, 0, sizeof(uint64_t));
4979     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4980     if (is_error(ret)) {
4981         return ret;
4982     }
4983 
4984     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4985     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4986     if (!lurb->target_urb_adr) {
4987         return -TARGET_EFAULT;
4988     }
4989     urb_hashtable_remove(lurb);
4990     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4991         lurb->host_urb.buffer_length);
4992     lurb->target_buf_ptr = NULL;
4993 
4994     /* restore the guest buffer pointer */
4995     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4996 
4997     /* update the guest urb struct */
4998     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4999     if (!argptr) {
5000         g_free(lurb);
5001         return -TARGET_EFAULT;
5002     }
5003     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5004     unlock_user(argptr, lurb->target_urb_adr, target_size);
5005 
5006     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5007     /* write back the urb handle */
5008     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5009     if (!argptr) {
5010         g_free(lurb);
5011         return -TARGET_EFAULT;
5012     }
5013 
5014     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5015     target_urb_adr = lurb->target_urb_adr;
5016     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5017     unlock_user(argptr, arg, target_size);
5018 
5019     g_free(lurb);
5020     return ret;
5021 }
5022 
5023 static abi_long
5024 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5025                              uint8_t *buf_temp __attribute__((unused)),
5026                              int fd, int cmd, abi_long arg)
5027 {
5028     struct live_urb *lurb;
5029 
5030     /* map target address back to host URB with metadata. */
5031     lurb = urb_hashtable_lookup(arg);
5032     if (!lurb) {
5033         return -TARGET_EFAULT;
5034     }
5035     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5036 }
5037 
5038 static abi_long
5039 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5040                             int fd, int cmd, abi_long arg)
5041 {
5042     const argtype *arg_type = ie->arg_type;
5043     int target_size;
5044     abi_long ret;
5045     void *argptr;
5046     int rw_dir;
5047     struct live_urb *lurb;
5048 
5049     /*
5050      * each submitted URB needs to map to a unique ID for the
5051      * kernel, and that unique ID needs to be a pointer to
5052      * host memory.  hence, we need to malloc for each URB.
5053      * isochronous transfers have a variable length struct.
5054      */
5055     arg_type++;
5056     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5057 
5058     /* construct host copy of urb and metadata */
5059     lurb = g_try_malloc0(sizeof(struct live_urb));
5060     if (!lurb) {
5061         return -TARGET_ENOMEM;
5062     }
5063 
5064     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5065     if (!argptr) {
5066         g_free(lurb);
5067         return -TARGET_EFAULT;
5068     }
5069     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5070     unlock_user(argptr, arg, 0);
5071 
5072     lurb->target_urb_adr = arg;
5073     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5074 
5075     /* buffer space used depends on endpoint type so lock the entire buffer */
5076     /* control type urbs should check the buffer contents for true direction */
5077     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5078     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5079         lurb->host_urb.buffer_length, 1);
5080     if (lurb->target_buf_ptr == NULL) {
5081         g_free(lurb);
5082         return -TARGET_EFAULT;
5083     }
5084 
5085     /* update buffer pointer in host copy */
5086     lurb->host_urb.buffer = lurb->target_buf_ptr;
5087 
5088     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5089     if (is_error(ret)) {
5090         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5091         g_free(lurb);
5092     } else {
5093         urb_hashtable_insert(lurb);
5094     }
5095 
5096     return ret;
5097 }
5098 #endif /* CONFIG_USBFS */
5099 
5100 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5101                             int cmd, abi_long arg)
5102 {
5103     void *argptr;
5104     struct dm_ioctl *host_dm;
5105     abi_long guest_data;
5106     uint32_t guest_data_size;
5107     int target_size;
5108     const argtype *arg_type = ie->arg_type;
5109     abi_long ret;
5110     void *big_buf = NULL;
5111     char *host_data;
5112 
5113     arg_type++;
5114     target_size = thunk_type_size(arg_type, 0);
5115     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5116     if (!argptr) {
5117         ret = -TARGET_EFAULT;
5118         goto out;
5119     }
5120     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5121     unlock_user(argptr, arg, 0);
5122 
5123     /* buf_temp is too small, so fetch things into a bigger buffer */
5124     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5125     memcpy(big_buf, buf_temp, target_size);
5126     buf_temp = big_buf;
5127     host_dm = big_buf;
5128 
5129     guest_data = arg + host_dm->data_start;
5130     if ((guest_data - arg) < 0) {
5131         ret = -TARGET_EINVAL;
5132         goto out;
5133     }
5134     guest_data_size = host_dm->data_size - host_dm->data_start;
5135     host_data = (char*)host_dm + host_dm->data_start;
5136 
5137     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5138     if (!argptr) {
5139         ret = -TARGET_EFAULT;
5140         goto out;
5141     }
5142 
5143     switch (ie->host_cmd) {
5144     case DM_REMOVE_ALL:
5145     case DM_LIST_DEVICES:
5146     case DM_DEV_CREATE:
5147     case DM_DEV_REMOVE:
5148     case DM_DEV_SUSPEND:
5149     case DM_DEV_STATUS:
5150     case DM_DEV_WAIT:
5151     case DM_TABLE_STATUS:
5152     case DM_TABLE_CLEAR:
5153     case DM_TABLE_DEPS:
5154     case DM_LIST_VERSIONS:
5155         /* no input data */
5156         break;
5157     case DM_DEV_RENAME:
5158     case DM_DEV_SET_GEOMETRY:
5159         /* data contains only strings */
5160         memcpy(host_data, argptr, guest_data_size);
5161         break;
5162     case DM_TARGET_MSG:
5163         memcpy(host_data, argptr, guest_data_size);
5164         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5165         break;
5166     case DM_TABLE_LOAD:
5167     {
5168         void *gspec = argptr;
5169         void *cur_data = host_data;
5170         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5171         int spec_size = thunk_type_size(arg_type, 0);
5172         int i;
5173 
5174         for (i = 0; i < host_dm->target_count; i++) {
5175             struct dm_target_spec *spec = cur_data;
5176             uint32_t next;
5177             int slen;
5178 
5179             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5180             slen = strlen((char*)gspec + spec_size) + 1;
5181             next = spec->next;
5182             spec->next = sizeof(*spec) + slen;
5183             strcpy((char*)&spec[1], gspec + spec_size);
5184             gspec += next;
5185             cur_data += spec->next;
5186         }
5187         break;
5188     }
5189     default:
5190         ret = -TARGET_EINVAL;
5191         unlock_user(argptr, guest_data, 0);
5192         goto out;
5193     }
5194     unlock_user(argptr, guest_data, 0);
5195 
5196     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5197     if (!is_error(ret)) {
5198         guest_data = arg + host_dm->data_start;
5199         guest_data_size = host_dm->data_size - host_dm->data_start;
5200         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5201         switch (ie->host_cmd) {
5202         case DM_REMOVE_ALL:
5203         case DM_DEV_CREATE:
5204         case DM_DEV_REMOVE:
5205         case DM_DEV_RENAME:
5206         case DM_DEV_SUSPEND:
5207         case DM_DEV_STATUS:
5208         case DM_TABLE_LOAD:
5209         case DM_TABLE_CLEAR:
5210         case DM_TARGET_MSG:
5211         case DM_DEV_SET_GEOMETRY:
5212             /* no return data */
5213             break;
5214         case DM_LIST_DEVICES:
5215         {
5216             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5217             uint32_t remaining_data = guest_data_size;
5218             void *cur_data = argptr;
5219             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5220             int nl_size = 12; /* can't use thunk_size due to alignment */
5221 
5222             while (1) {
5223                 uint32_t next = nl->next;
5224                 if (next) {
5225                     nl->next = nl_size + (strlen(nl->name) + 1);
5226                 }
5227                 if (remaining_data < nl->next) {
5228                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5229                     break;
5230                 }
5231                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5232                 strcpy(cur_data + nl_size, nl->name);
5233                 cur_data += nl->next;
5234                 remaining_data -= nl->next;
5235                 if (!next) {
5236                     break;
5237                 }
5238                 nl = (void*)nl + next;
5239             }
5240             break;
5241         }
5242         case DM_DEV_WAIT:
5243         case DM_TABLE_STATUS:
5244         {
5245             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5246             void *cur_data = argptr;
5247             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5248             int spec_size = thunk_type_size(arg_type, 0);
5249             int i;
5250 
5251             for (i = 0; i < host_dm->target_count; i++) {
5252                 uint32_t next = spec->next;
5253                 int slen = strlen((char*)&spec[1]) + 1;
5254                 spec->next = (cur_data - argptr) + spec_size + slen;
5255                 if (guest_data_size < spec->next) {
5256                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5257                     break;
5258                 }
5259                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5260                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5261                 cur_data = argptr + spec->next;
5262                 spec = (void*)host_dm + host_dm->data_start + next;
5263             }
5264             break;
5265         }
5266         case DM_TABLE_DEPS:
5267         {
5268             void *hdata = (void*)host_dm + host_dm->data_start;
5269             int count = *(uint32_t*)hdata;
5270             uint64_t *hdev = hdata + 8;
5271             uint64_t *gdev = argptr + 8;
5272             int i;
5273 
5274             *(uint32_t*)argptr = tswap32(count);
5275             for (i = 0; i < count; i++) {
5276                 *gdev = tswap64(*hdev);
5277                 gdev++;
5278                 hdev++;
5279             }
5280             break;
5281         }
5282         case DM_LIST_VERSIONS:
5283         {
5284             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5285             uint32_t remaining_data = guest_data_size;
5286             void *cur_data = argptr;
5287             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5288             int vers_size = thunk_type_size(arg_type, 0);
5289 
5290             while (1) {
5291                 uint32_t next = vers->next;
5292                 if (next) {
5293                     vers->next = vers_size + (strlen(vers->name) + 1);
5294                 }
5295                 if (remaining_data < vers->next) {
5296                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5297                     break;
5298                 }
5299                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5300                 strcpy(cur_data + vers_size, vers->name);
5301                 cur_data += vers->next;
5302                 remaining_data -= vers->next;
5303                 if (!next) {
5304                     break;
5305                 }
5306                 vers = (void*)vers + next;
5307             }
5308             break;
5309         }
5310         default:
5311             unlock_user(argptr, guest_data, 0);
5312             ret = -TARGET_EINVAL;
5313             goto out;
5314         }
5315         unlock_user(argptr, guest_data, guest_data_size);
5316 
5317         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5318         if (!argptr) {
5319             ret = -TARGET_EFAULT;
5320             goto out;
5321         }
5322         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5323         unlock_user(argptr, arg, target_size);
5324     }
5325 out:
5326     g_free(big_buf);
5327     return ret;
5328 }
5329 
5330 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5331                                int cmd, abi_long arg)
5332 {
5333     void *argptr;
5334     int target_size;
5335     const argtype *arg_type = ie->arg_type;
5336     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5337     abi_long ret;
5338 
5339     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5340     struct blkpg_partition host_part;
5341 
5342     /* Read and convert blkpg */
5343     arg_type++;
5344     target_size = thunk_type_size(arg_type, 0);
5345     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5346     if (!argptr) {
5347         ret = -TARGET_EFAULT;
5348         goto out;
5349     }
5350     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5351     unlock_user(argptr, arg, 0);
5352 
5353     switch (host_blkpg->op) {
5354     case BLKPG_ADD_PARTITION:
5355     case BLKPG_DEL_PARTITION:
5356         /* payload is struct blkpg_partition */
5357         break;
5358     default:
5359         /* Unknown opcode */
5360         ret = -TARGET_EINVAL;
5361         goto out;
5362     }
5363 
5364     /* Read and convert blkpg->data */
5365     arg = (abi_long)(uintptr_t)host_blkpg->data;
5366     target_size = thunk_type_size(part_arg_type, 0);
5367     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5368     if (!argptr) {
5369         ret = -TARGET_EFAULT;
5370         goto out;
5371     }
5372     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5373     unlock_user(argptr, arg, 0);
5374 
5375     /* Swizzle the data pointer to our local copy and call! */
5376     host_blkpg->data = &host_part;
5377     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5378 
5379 out:
5380     return ret;
5381 }
5382 
5383 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5384                                 int fd, int cmd, abi_long arg)
5385 {
5386     const argtype *arg_type = ie->arg_type;
5387     const StructEntry *se;
5388     const argtype *field_types;
5389     const int *dst_offsets, *src_offsets;
5390     int target_size;
5391     void *argptr;
5392     abi_ulong *target_rt_dev_ptr = NULL;
5393     unsigned long *host_rt_dev_ptr = NULL;
5394     abi_long ret;
5395     int i;
5396 
5397     assert(ie->access == IOC_W);
5398     assert(*arg_type == TYPE_PTR);
5399     arg_type++;
5400     assert(*arg_type == TYPE_STRUCT);
5401     target_size = thunk_type_size(arg_type, 0);
5402     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5403     if (!argptr) {
5404         return -TARGET_EFAULT;
5405     }
5406     arg_type++;
5407     assert(*arg_type == (int)STRUCT_rtentry);
5408     se = struct_entries + *arg_type++;
5409     assert(se->convert[0] == NULL);
5410     /* convert struct here to be able to catch rt_dev string */
5411     field_types = se->field_types;
5412     dst_offsets = se->field_offsets[THUNK_HOST];
5413     src_offsets = se->field_offsets[THUNK_TARGET];
5414     for (i = 0; i < se->nb_fields; i++) {
5415         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5416             assert(*field_types == TYPE_PTRVOID);
5417             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5418             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5419             if (*target_rt_dev_ptr != 0) {
5420                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5421                                                   tswapal(*target_rt_dev_ptr));
5422                 if (!*host_rt_dev_ptr) {
5423                     unlock_user(argptr, arg, 0);
5424                     return -TARGET_EFAULT;
5425                 }
5426             } else {
5427                 *host_rt_dev_ptr = 0;
5428             }
5429             field_types++;
5430             continue;
5431         }
5432         field_types = thunk_convert(buf_temp + dst_offsets[i],
5433                                     argptr + src_offsets[i],
5434                                     field_types, THUNK_HOST);
5435     }
5436     unlock_user(argptr, arg, 0);
5437 
5438     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5439 
5440     assert(host_rt_dev_ptr != NULL);
5441     assert(target_rt_dev_ptr != NULL);
5442     if (*host_rt_dev_ptr != 0) {
5443         unlock_user((void *)*host_rt_dev_ptr,
5444                     *target_rt_dev_ptr, 0);
5445     }
5446     return ret;
5447 }
5448 
5449 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5450                                      int fd, int cmd, abi_long arg)
5451 {
5452     int sig = target_to_host_signal(arg);
5453     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5454 }
5455 
5456 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5457                                     int fd, int cmd, abi_long arg)
5458 {
5459     struct timeval tv;
5460     abi_long ret;
5461 
5462     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5463     if (is_error(ret)) {
5464         return ret;
5465     }
5466 
5467     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5468         if (copy_to_user_timeval(arg, &tv)) {
5469             return -TARGET_EFAULT;
5470         }
5471     } else {
5472         if (copy_to_user_timeval64(arg, &tv)) {
5473             return -TARGET_EFAULT;
5474         }
5475     }
5476 
5477     return ret;
5478 }
5479 
5480 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5481                                       int fd, int cmd, abi_long arg)
5482 {
5483     struct timespec ts;
5484     abi_long ret;
5485 
5486     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5487     if (is_error(ret)) {
5488         return ret;
5489     }
5490 
5491     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5492         if (host_to_target_timespec(arg, &ts)) {
5493             return -TARGET_EFAULT;
5494         }
5495     } else{
5496         if (host_to_target_timespec64(arg, &ts)) {
5497             return -TARGET_EFAULT;
5498         }
5499     }
5500 
5501     return ret;
5502 }
5503 
5504 #ifdef TIOCGPTPEER
5505 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5506                                      int fd, int cmd, abi_long arg)
5507 {
5508     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5509     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5510 }
5511 #endif
5512 
5513 #ifdef HAVE_DRM_H
5514 
5515 static void unlock_drm_version(struct drm_version *host_ver,
5516                                struct target_drm_version *target_ver,
5517                                bool copy)
5518 {
5519     unlock_user(host_ver->name, target_ver->name,
5520                                 copy ? host_ver->name_len : 0);
5521     unlock_user(host_ver->date, target_ver->date,
5522                                 copy ? host_ver->date_len : 0);
5523     unlock_user(host_ver->desc, target_ver->desc,
5524                                 copy ? host_ver->desc_len : 0);
5525 }
5526 
5527 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5528                                           struct target_drm_version *target_ver)
5529 {
5530     memset(host_ver, 0, sizeof(*host_ver));
5531 
5532     __get_user(host_ver->name_len, &target_ver->name_len);
5533     if (host_ver->name_len) {
5534         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5535                                    target_ver->name_len, 0);
5536         if (!host_ver->name) {
5537             return -EFAULT;
5538         }
5539     }
5540 
5541     __get_user(host_ver->date_len, &target_ver->date_len);
5542     if (host_ver->date_len) {
5543         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5544                                    target_ver->date_len, 0);
5545         if (!host_ver->date) {
5546             goto err;
5547         }
5548     }
5549 
5550     __get_user(host_ver->desc_len, &target_ver->desc_len);
5551     if (host_ver->desc_len) {
5552         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5553                                    target_ver->desc_len, 0);
5554         if (!host_ver->desc) {
5555             goto err;
5556         }
5557     }
5558 
5559     return 0;
5560 err:
5561     unlock_drm_version(host_ver, target_ver, false);
5562     return -EFAULT;
5563 }
5564 
5565 static inline void host_to_target_drmversion(
5566                                           struct target_drm_version *target_ver,
5567                                           struct drm_version *host_ver)
5568 {
5569     __put_user(host_ver->version_major, &target_ver->version_major);
5570     __put_user(host_ver->version_minor, &target_ver->version_minor);
5571     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5572     __put_user(host_ver->name_len, &target_ver->name_len);
5573     __put_user(host_ver->date_len, &target_ver->date_len);
5574     __put_user(host_ver->desc_len, &target_ver->desc_len);
5575     unlock_drm_version(host_ver, target_ver, true);
5576 }
5577 
5578 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5579                              int fd, int cmd, abi_long arg)
5580 {
5581     struct drm_version *ver;
5582     struct target_drm_version *target_ver;
5583     abi_long ret;
5584 
5585     switch (ie->host_cmd) {
5586     case DRM_IOCTL_VERSION:
5587         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5588             return -TARGET_EFAULT;
5589         }
5590         ver = (struct drm_version *)buf_temp;
5591         ret = target_to_host_drmversion(ver, target_ver);
5592         if (!is_error(ret)) {
5593             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5594             if (is_error(ret)) {
5595                 unlock_drm_version(ver, target_ver, false);
5596             } else {
5597                 host_to_target_drmversion(target_ver, ver);
5598             }
5599         }
5600         unlock_user_struct(target_ver, arg, 0);
5601         return ret;
5602     }
5603     return -TARGET_ENOSYS;
5604 }
5605 
5606 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5607                                            struct drm_i915_getparam *gparam,
5608                                            int fd, abi_long arg)
5609 {
5610     abi_long ret;
5611     int value;
5612     struct target_drm_i915_getparam *target_gparam;
5613 
5614     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5615         return -TARGET_EFAULT;
5616     }
5617 
5618     __get_user(gparam->param, &target_gparam->param);
5619     gparam->value = &value;
5620     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5621     put_user_s32(value, target_gparam->value);
5622 
5623     unlock_user_struct(target_gparam, arg, 0);
5624     return ret;
5625 }
5626 
5627 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5628                                   int fd, int cmd, abi_long arg)
5629 {
5630     switch (ie->host_cmd) {
5631     case DRM_IOCTL_I915_GETPARAM:
5632         return do_ioctl_drm_i915_getparam(ie,
5633                                           (struct drm_i915_getparam *)buf_temp,
5634                                           fd, arg);
5635     default:
5636         return -TARGET_ENOSYS;
5637     }
5638 }
5639 
5640 #endif
5641 
5642 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5643                                         int fd, int cmd, abi_long arg)
5644 {
5645     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5646     struct tun_filter *target_filter;
5647     char *target_addr;
5648 
5649     assert(ie->access == IOC_W);
5650 
5651     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5652     if (!target_filter) {
5653         return -TARGET_EFAULT;
5654     }
5655     filter->flags = tswap16(target_filter->flags);
5656     filter->count = tswap16(target_filter->count);
5657     unlock_user(target_filter, arg, 0);
5658 
5659     if (filter->count) {
5660         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5661             MAX_STRUCT_SIZE) {
5662             return -TARGET_EFAULT;
5663         }
5664 
5665         target_addr = lock_user(VERIFY_READ,
5666                                 arg + offsetof(struct tun_filter, addr),
5667                                 filter->count * ETH_ALEN, 1);
5668         if (!target_addr) {
5669             return -TARGET_EFAULT;
5670         }
5671         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5672         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5673     }
5674 
5675     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5676 }
5677 
5678 IOCTLEntry ioctl_entries[] = {
5679 #define IOCTL(cmd, access, ...) \
5680     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5681 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5682     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5683 #define IOCTL_IGNORE(cmd) \
5684     { TARGET_ ## cmd, 0, #cmd },
5685 #include "ioctls.h"
5686     { 0, 0, },
5687 };
5688 
5689 /* ??? Implement proper locking for ioctls.  */
5690 /* do_ioctl() Must return target values and target errnos. */
5691 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5692 {
5693     const IOCTLEntry *ie;
5694     const argtype *arg_type;
5695     abi_long ret;
5696     uint8_t buf_temp[MAX_STRUCT_SIZE];
5697     int target_size;
5698     void *argptr;
5699 
5700     ie = ioctl_entries;
5701     for(;;) {
5702         if (ie->target_cmd == 0) {
5703             qemu_log_mask(
5704                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5705             return -TARGET_ENOSYS;
5706         }
5707         if (ie->target_cmd == cmd)
5708             break;
5709         ie++;
5710     }
5711     arg_type = ie->arg_type;
5712     if (ie->do_ioctl) {
5713         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5714     } else if (!ie->host_cmd) {
5715         /* Some architectures define BSD ioctls in their headers
5716            that are not implemented in Linux.  */
5717         return -TARGET_ENOSYS;
5718     }
5719 
5720     switch(arg_type[0]) {
5721     case TYPE_NULL:
5722         /* no argument */
5723         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5724         break;
5725     case TYPE_PTRVOID:
5726     case TYPE_INT:
5727     case TYPE_LONG:
5728     case TYPE_ULONG:
5729         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5730         break;
5731     case TYPE_PTR:
5732         arg_type++;
5733         target_size = thunk_type_size(arg_type, 0);
5734         switch(ie->access) {
5735         case IOC_R:
5736             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5737             if (!is_error(ret)) {
5738                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5739                 if (!argptr)
5740                     return -TARGET_EFAULT;
5741                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5742                 unlock_user(argptr, arg, target_size);
5743             }
5744             break;
5745         case IOC_W:
5746             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5747             if (!argptr)
5748                 return -TARGET_EFAULT;
5749             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5750             unlock_user(argptr, arg, 0);
5751             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5752             break;
5753         default:
5754         case IOC_RW:
5755             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5756             if (!argptr)
5757                 return -TARGET_EFAULT;
5758             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5759             unlock_user(argptr, arg, 0);
5760             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5761             if (!is_error(ret)) {
5762                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5763                 if (!argptr)
5764                     return -TARGET_EFAULT;
5765                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5766                 unlock_user(argptr, arg, target_size);
5767             }
5768             break;
5769         }
5770         break;
5771     default:
5772         qemu_log_mask(LOG_UNIMP,
5773                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5774                       (long)cmd, arg_type[0]);
5775         ret = -TARGET_ENOSYS;
5776         break;
5777     }
5778     return ret;
5779 }
5780 
5781 static const bitmask_transtbl iflag_tbl[] = {
5782         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5783         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5784         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5785         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5786         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5787         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5788         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5789         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5790         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5791         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5792         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5793         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5794         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5795         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5796         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5797         { 0, 0, 0, 0 }
5798 };
5799 
5800 static const bitmask_transtbl oflag_tbl[] = {
5801 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5802 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5803 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5804 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5805 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5806 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5807 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5808 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5809 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5810 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5811 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5812 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5813 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5814 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5815 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5816 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5817 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5818 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5819 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5820 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5821 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5822 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5823 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5824 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5825 	{ 0, 0, 0, 0 }
5826 };
5827 
5828 static const bitmask_transtbl cflag_tbl[] = {
5829 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5830 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5831 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5832 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5833 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5834 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5835 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5836 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5837 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5838 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5839 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5840 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5841 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5842 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5843 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5844 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5845 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5846 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5847 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5848 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5849 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5850 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5851 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5852 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5853 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5854 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5855 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5856 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5857 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5858 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5859 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5860 	{ 0, 0, 0, 0 }
5861 };
5862 
5863 static const bitmask_transtbl lflag_tbl[] = {
5864   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5865   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5866   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5867   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5868   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5869   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5870   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5871   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5872   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5873   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5874   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5875   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5876   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5877   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5878   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5879   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5880   { 0, 0, 0, 0 }
5881 };
5882 
5883 static void target_to_host_termios (void *dst, const void *src)
5884 {
5885     struct host_termios *host = dst;
5886     const struct target_termios *target = src;
5887 
5888     host->c_iflag =
5889         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5890     host->c_oflag =
5891         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5892     host->c_cflag =
5893         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5894     host->c_lflag =
5895         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5896     host->c_line = target->c_line;
5897 
5898     memset(host->c_cc, 0, sizeof(host->c_cc));
5899     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5900     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5901     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5902     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5903     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5904     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5905     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5906     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5907     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5908     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5909     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5910     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5911     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5912     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5913     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5914     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5915     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5916 }
5917 
5918 static void host_to_target_termios (void *dst, const void *src)
5919 {
5920     struct target_termios *target = dst;
5921     const struct host_termios *host = src;
5922 
5923     target->c_iflag =
5924         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5925     target->c_oflag =
5926         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5927     target->c_cflag =
5928         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5929     target->c_lflag =
5930         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5931     target->c_line = host->c_line;
5932 
5933     memset(target->c_cc, 0, sizeof(target->c_cc));
5934     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5935     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5936     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5937     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5938     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5939     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5940     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5941     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5942     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5943     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5944     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5945     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5946     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5947     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5948     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5949     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5950     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5951 }
5952 
5953 static const StructEntry struct_termios_def = {
5954     .convert = { host_to_target_termios, target_to_host_termios },
5955     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5956     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5957     .print = print_termios,
5958 };
5959 
5960 static const bitmask_transtbl mmap_flags_tbl[] = {
5961     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5962     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5963     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5964     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5965       MAP_ANONYMOUS, MAP_ANONYMOUS },
5966     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5967       MAP_GROWSDOWN, MAP_GROWSDOWN },
5968     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5969       MAP_DENYWRITE, MAP_DENYWRITE },
5970     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5971       MAP_EXECUTABLE, MAP_EXECUTABLE },
5972     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5973     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5974       MAP_NORESERVE, MAP_NORESERVE },
5975     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5976     /* MAP_STACK had been ignored by the kernel for quite some time.
5977        Recognize it for the target insofar as we do not want to pass
5978        it through to the host.  */
5979     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5980     { 0, 0, 0, 0 }
5981 };
5982 
5983 /*
5984  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5985  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5986  */
5987 #if defined(TARGET_I386)
5988 
5989 /* NOTE: there is really one LDT for all the threads */
5990 static uint8_t *ldt_table;
5991 
5992 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5993 {
5994     int size;
5995     void *p;
5996 
5997     if (!ldt_table)
5998         return 0;
5999     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6000     if (size > bytecount)
6001         size = bytecount;
6002     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6003     if (!p)
6004         return -TARGET_EFAULT;
6005     /* ??? Should this by byteswapped?  */
6006     memcpy(p, ldt_table, size);
6007     unlock_user(p, ptr, size);
6008     return size;
6009 }
6010 
6011 /* XXX: add locking support */
6012 static abi_long write_ldt(CPUX86State *env,
6013                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6014 {
6015     struct target_modify_ldt_ldt_s ldt_info;
6016     struct target_modify_ldt_ldt_s *target_ldt_info;
6017     int seg_32bit, contents, read_exec_only, limit_in_pages;
6018     int seg_not_present, useable, lm;
6019     uint32_t *lp, entry_1, entry_2;
6020 
6021     if (bytecount != sizeof(ldt_info))
6022         return -TARGET_EINVAL;
6023     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6024         return -TARGET_EFAULT;
6025     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6026     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6027     ldt_info.limit = tswap32(target_ldt_info->limit);
6028     ldt_info.flags = tswap32(target_ldt_info->flags);
6029     unlock_user_struct(target_ldt_info, ptr, 0);
6030 
6031     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6032         return -TARGET_EINVAL;
6033     seg_32bit = ldt_info.flags & 1;
6034     contents = (ldt_info.flags >> 1) & 3;
6035     read_exec_only = (ldt_info.flags >> 3) & 1;
6036     limit_in_pages = (ldt_info.flags >> 4) & 1;
6037     seg_not_present = (ldt_info.flags >> 5) & 1;
6038     useable = (ldt_info.flags >> 6) & 1;
6039 #ifdef TARGET_ABI32
6040     lm = 0;
6041 #else
6042     lm = (ldt_info.flags >> 7) & 1;
6043 #endif
6044     if (contents == 3) {
6045         if (oldmode)
6046             return -TARGET_EINVAL;
6047         if (seg_not_present == 0)
6048             return -TARGET_EINVAL;
6049     }
6050     /* allocate the LDT */
6051     if (!ldt_table) {
6052         env->ldt.base = target_mmap(0,
6053                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6054                                     PROT_READ|PROT_WRITE,
6055                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6056         if (env->ldt.base == -1)
6057             return -TARGET_ENOMEM;
6058         memset(g2h_untagged(env->ldt.base), 0,
6059                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6060         env->ldt.limit = 0xffff;
6061         ldt_table = g2h_untagged(env->ldt.base);
6062     }
6063 
6064     /* NOTE: same code as Linux kernel */
6065     /* Allow LDTs to be cleared by the user. */
6066     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6067         if (oldmode ||
6068             (contents == 0		&&
6069              read_exec_only == 1	&&
6070              seg_32bit == 0		&&
6071              limit_in_pages == 0	&&
6072              seg_not_present == 1	&&
6073              useable == 0 )) {
6074             entry_1 = 0;
6075             entry_2 = 0;
6076             goto install;
6077         }
6078     }
6079 
6080     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6081         (ldt_info.limit & 0x0ffff);
6082     entry_2 = (ldt_info.base_addr & 0xff000000) |
6083         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6084         (ldt_info.limit & 0xf0000) |
6085         ((read_exec_only ^ 1) << 9) |
6086         (contents << 10) |
6087         ((seg_not_present ^ 1) << 15) |
6088         (seg_32bit << 22) |
6089         (limit_in_pages << 23) |
6090         (lm << 21) |
6091         0x7000;
6092     if (!oldmode)
6093         entry_2 |= (useable << 20);
6094 
6095     /* Install the new entry ...  */
6096 install:
6097     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6098     lp[0] = tswap32(entry_1);
6099     lp[1] = tswap32(entry_2);
6100     return 0;
6101 }
6102 
6103 /* specific and weird i386 syscalls */
6104 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6105                               unsigned long bytecount)
6106 {
6107     abi_long ret;
6108 
6109     switch (func) {
6110     case 0:
6111         ret = read_ldt(ptr, bytecount);
6112         break;
6113     case 1:
6114         ret = write_ldt(env, ptr, bytecount, 1);
6115         break;
6116     case 0x11:
6117         ret = write_ldt(env, ptr, bytecount, 0);
6118         break;
6119     default:
6120         ret = -TARGET_ENOSYS;
6121         break;
6122     }
6123     return ret;
6124 }
6125 
6126 #if defined(TARGET_ABI32)
6127 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6128 {
6129     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6130     struct target_modify_ldt_ldt_s ldt_info;
6131     struct target_modify_ldt_ldt_s *target_ldt_info;
6132     int seg_32bit, contents, read_exec_only, limit_in_pages;
6133     int seg_not_present, useable, lm;
6134     uint32_t *lp, entry_1, entry_2;
6135     int i;
6136 
6137     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6138     if (!target_ldt_info)
6139         return -TARGET_EFAULT;
6140     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6141     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6142     ldt_info.limit = tswap32(target_ldt_info->limit);
6143     ldt_info.flags = tswap32(target_ldt_info->flags);
6144     if (ldt_info.entry_number == -1) {
6145         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6146             if (gdt_table[i] == 0) {
6147                 ldt_info.entry_number = i;
6148                 target_ldt_info->entry_number = tswap32(i);
6149                 break;
6150             }
6151         }
6152     }
6153     unlock_user_struct(target_ldt_info, ptr, 1);
6154 
6155     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6156         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6157            return -TARGET_EINVAL;
6158     seg_32bit = ldt_info.flags & 1;
6159     contents = (ldt_info.flags >> 1) & 3;
6160     read_exec_only = (ldt_info.flags >> 3) & 1;
6161     limit_in_pages = (ldt_info.flags >> 4) & 1;
6162     seg_not_present = (ldt_info.flags >> 5) & 1;
6163     useable = (ldt_info.flags >> 6) & 1;
6164 #ifdef TARGET_ABI32
6165     lm = 0;
6166 #else
6167     lm = (ldt_info.flags >> 7) & 1;
6168 #endif
6169 
6170     if (contents == 3) {
6171         if (seg_not_present == 0)
6172             return -TARGET_EINVAL;
6173     }
6174 
6175     /* NOTE: same code as Linux kernel */
6176     /* Allow LDTs to be cleared by the user. */
6177     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6178         if ((contents == 0             &&
6179              read_exec_only == 1       &&
6180              seg_32bit == 0            &&
6181              limit_in_pages == 0       &&
6182              seg_not_present == 1      &&
6183              useable == 0 )) {
6184             entry_1 = 0;
6185             entry_2 = 0;
6186             goto install;
6187         }
6188     }
6189 
6190     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6191         (ldt_info.limit & 0x0ffff);
6192     entry_2 = (ldt_info.base_addr & 0xff000000) |
6193         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6194         (ldt_info.limit & 0xf0000) |
6195         ((read_exec_only ^ 1) << 9) |
6196         (contents << 10) |
6197         ((seg_not_present ^ 1) << 15) |
6198         (seg_32bit << 22) |
6199         (limit_in_pages << 23) |
6200         (useable << 20) |
6201         (lm << 21) |
6202         0x7000;
6203 
6204     /* Install the new entry ...  */
6205 install:
6206     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6207     lp[0] = tswap32(entry_1);
6208     lp[1] = tswap32(entry_2);
6209     return 0;
6210 }
6211 
6212 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6213 {
6214     struct target_modify_ldt_ldt_s *target_ldt_info;
6215     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6216     uint32_t base_addr, limit, flags;
6217     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6218     int seg_not_present, useable, lm;
6219     uint32_t *lp, entry_1, entry_2;
6220 
6221     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6222     if (!target_ldt_info)
6223         return -TARGET_EFAULT;
6224     idx = tswap32(target_ldt_info->entry_number);
6225     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6226         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6227         unlock_user_struct(target_ldt_info, ptr, 1);
6228         return -TARGET_EINVAL;
6229     }
6230     lp = (uint32_t *)(gdt_table + idx);
6231     entry_1 = tswap32(lp[0]);
6232     entry_2 = tswap32(lp[1]);
6233 
6234     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6235     contents = (entry_2 >> 10) & 3;
6236     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6237     seg_32bit = (entry_2 >> 22) & 1;
6238     limit_in_pages = (entry_2 >> 23) & 1;
6239     useable = (entry_2 >> 20) & 1;
6240 #ifdef TARGET_ABI32
6241     lm = 0;
6242 #else
6243     lm = (entry_2 >> 21) & 1;
6244 #endif
6245     flags = (seg_32bit << 0) | (contents << 1) |
6246         (read_exec_only << 3) | (limit_in_pages << 4) |
6247         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6248     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6249     base_addr = (entry_1 >> 16) |
6250         (entry_2 & 0xff000000) |
6251         ((entry_2 & 0xff) << 16);
6252     target_ldt_info->base_addr = tswapal(base_addr);
6253     target_ldt_info->limit = tswap32(limit);
6254     target_ldt_info->flags = tswap32(flags);
6255     unlock_user_struct(target_ldt_info, ptr, 1);
6256     return 0;
6257 }
6258 
6259 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6260 {
6261     return -TARGET_ENOSYS;
6262 }
6263 #else
6264 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6265 {
6266     abi_long ret = 0;
6267     abi_ulong val;
6268     int idx;
6269 
6270     switch(code) {
6271     case TARGET_ARCH_SET_GS:
6272     case TARGET_ARCH_SET_FS:
6273         if (code == TARGET_ARCH_SET_GS)
6274             idx = R_GS;
6275         else
6276             idx = R_FS;
6277         cpu_x86_load_seg(env, idx, 0);
6278         env->segs[idx].base = addr;
6279         break;
6280     case TARGET_ARCH_GET_GS:
6281     case TARGET_ARCH_GET_FS:
6282         if (code == TARGET_ARCH_GET_GS)
6283             idx = R_GS;
6284         else
6285             idx = R_FS;
6286         val = env->segs[idx].base;
6287         if (put_user(val, addr, abi_ulong))
6288             ret = -TARGET_EFAULT;
6289         break;
6290     default:
6291         ret = -TARGET_EINVAL;
6292         break;
6293     }
6294     return ret;
6295 }
6296 #endif /* defined(TARGET_ABI32 */
6297 #endif /* defined(TARGET_I386) */
6298 
6299 /*
6300  * These constants are generic.  Supply any that are missing from the host.
6301  */
6302 #ifndef PR_SET_NAME
6303 # define PR_SET_NAME    15
6304 # define PR_GET_NAME    16
6305 #endif
6306 #ifndef PR_SET_FP_MODE
6307 # define PR_SET_FP_MODE 45
6308 # define PR_GET_FP_MODE 46
6309 # define PR_FP_MODE_FR   (1 << 0)
6310 # define PR_FP_MODE_FRE  (1 << 1)
6311 #endif
6312 #ifndef PR_SVE_SET_VL
6313 # define PR_SVE_SET_VL  50
6314 # define PR_SVE_GET_VL  51
6315 # define PR_SVE_VL_LEN_MASK  0xffff
6316 # define PR_SVE_VL_INHERIT   (1 << 17)
6317 #endif
6318 #ifndef PR_PAC_RESET_KEYS
6319 # define PR_PAC_RESET_KEYS  54
6320 # define PR_PAC_APIAKEY   (1 << 0)
6321 # define PR_PAC_APIBKEY   (1 << 1)
6322 # define PR_PAC_APDAKEY   (1 << 2)
6323 # define PR_PAC_APDBKEY   (1 << 3)
6324 # define PR_PAC_APGAKEY   (1 << 4)
6325 #endif
6326 #ifndef PR_SET_TAGGED_ADDR_CTRL
6327 # define PR_SET_TAGGED_ADDR_CTRL 55
6328 # define PR_GET_TAGGED_ADDR_CTRL 56
6329 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6330 #endif
6331 #ifndef PR_MTE_TCF_SHIFT
6332 # define PR_MTE_TCF_SHIFT       1
6333 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6334 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6335 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6336 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6337 # define PR_MTE_TAG_SHIFT       3
6338 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6339 #endif
6340 #ifndef PR_SET_IO_FLUSHER
6341 # define PR_SET_IO_FLUSHER 57
6342 # define PR_GET_IO_FLUSHER 58
6343 #endif
6344 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6345 # define PR_SET_SYSCALL_USER_DISPATCH 59
6346 #endif
6347 
6348 #include "target_prctl.h"
6349 
6350 static abi_long do_prctl_inval0(CPUArchState *env)
6351 {
6352     return -TARGET_EINVAL;
6353 }
6354 
6355 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6356 {
6357     return -TARGET_EINVAL;
6358 }
6359 
6360 #ifndef do_prctl_get_fp_mode
6361 #define do_prctl_get_fp_mode do_prctl_inval0
6362 #endif
6363 #ifndef do_prctl_set_fp_mode
6364 #define do_prctl_set_fp_mode do_prctl_inval1
6365 #endif
6366 #ifndef do_prctl_get_vl
6367 #define do_prctl_get_vl do_prctl_inval0
6368 #endif
6369 #ifndef do_prctl_set_vl
6370 #define do_prctl_set_vl do_prctl_inval1
6371 #endif
6372 #ifndef do_prctl_reset_keys
6373 #define do_prctl_reset_keys do_prctl_inval1
6374 #endif
6375 #ifndef do_prctl_set_tagged_addr_ctrl
6376 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6377 #endif
6378 #ifndef do_prctl_get_tagged_addr_ctrl
6379 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6380 #endif
6381 
6382 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6383                          abi_long arg3, abi_long arg4, abi_long arg5)
6384 {
6385     abi_long ret;
6386 
6387     switch (option) {
6388     case PR_GET_PDEATHSIG:
6389         {
6390             int deathsig;
6391             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6392                                   arg3, arg4, arg5));
6393             if (!is_error(ret) && arg2 && put_user_s32(deathsig, arg2)) {
6394                 return -TARGET_EFAULT;
6395             }
6396             return ret;
6397         }
6398     case PR_GET_NAME:
6399         {
6400             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6401             if (!name) {
6402                 return -TARGET_EFAULT;
6403             }
6404             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6405                                   arg3, arg4, arg5));
6406             unlock_user(name, arg2, 16);
6407             return ret;
6408         }
6409     case PR_SET_NAME:
6410         {
6411             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6412             if (!name) {
6413                 return -TARGET_EFAULT;
6414             }
6415             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6416                                   arg3, arg4, arg5));
6417             unlock_user(name, arg2, 0);
6418             return ret;
6419         }
6420     case PR_GET_FP_MODE:
6421         return do_prctl_get_fp_mode(env);
6422     case PR_SET_FP_MODE:
6423         return do_prctl_set_fp_mode(env, arg2);
6424     case PR_SVE_GET_VL:
6425         return do_prctl_get_vl(env);
6426     case PR_SVE_SET_VL:
6427         return do_prctl_set_vl(env, arg2);
6428     case PR_PAC_RESET_KEYS:
6429         if (arg3 || arg4 || arg5) {
6430             return -TARGET_EINVAL;
6431         }
6432         return do_prctl_reset_keys(env, arg2);
6433     case PR_SET_TAGGED_ADDR_CTRL:
6434         if (arg3 || arg4 || arg5) {
6435             return -TARGET_EINVAL;
6436         }
6437         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6438     case PR_GET_TAGGED_ADDR_CTRL:
6439         if (arg2 || arg3 || arg4 || arg5) {
6440             return -TARGET_EINVAL;
6441         }
6442         return do_prctl_get_tagged_addr_ctrl(env);
6443 
6444     case PR_GET_DUMPABLE:
6445     case PR_SET_DUMPABLE:
6446     case PR_GET_KEEPCAPS:
6447     case PR_SET_KEEPCAPS:
6448     case PR_GET_TIMING:
6449     case PR_SET_TIMING:
6450     case PR_GET_TIMERSLACK:
6451     case PR_SET_TIMERSLACK:
6452     case PR_MCE_KILL:
6453     case PR_MCE_KILL_GET:
6454     case PR_GET_NO_NEW_PRIVS:
6455     case PR_SET_NO_NEW_PRIVS:
6456     case PR_GET_IO_FLUSHER:
6457     case PR_SET_IO_FLUSHER:
6458         /* Some prctl options have no pointer arguments and we can pass on. */
6459         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6460 
6461     case PR_GET_CHILD_SUBREAPER:
6462     case PR_SET_CHILD_SUBREAPER:
6463     case PR_GET_SPECULATION_CTRL:
6464     case PR_SET_SPECULATION_CTRL:
6465     case PR_GET_TID_ADDRESS:
6466         /* TODO */
6467         return -TARGET_EINVAL;
6468 
6469     case PR_GET_FPEXC:
6470     case PR_SET_FPEXC:
6471         /* Was used for SPE on PowerPC. */
6472         return -TARGET_EINVAL;
6473 
6474     case PR_GET_ENDIAN:
6475     case PR_SET_ENDIAN:
6476     case PR_GET_FPEMU:
6477     case PR_SET_FPEMU:
6478     case PR_SET_MM:
6479     case PR_GET_SECCOMP:
6480     case PR_SET_SECCOMP:
6481     case PR_SET_SYSCALL_USER_DISPATCH:
6482     case PR_GET_THP_DISABLE:
6483     case PR_SET_THP_DISABLE:
6484     case PR_GET_TSC:
6485     case PR_SET_TSC:
6486     case PR_GET_UNALIGN:
6487     case PR_SET_UNALIGN:
6488         /* Disable to prevent the target disabling stuff we need. */
6489         return -TARGET_EINVAL;
6490 
6491     default:
6492         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6493                       option);
6494         return -TARGET_EINVAL;
6495     }
6496 }
6497 
6498 #define NEW_STACK_SIZE 0x40000
6499 
6500 
6501 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6502 typedef struct {
6503     CPUArchState *env;
6504     pthread_mutex_t mutex;
6505     pthread_cond_t cond;
6506     pthread_t thread;
6507     uint32_t tid;
6508     abi_ulong child_tidptr;
6509     abi_ulong parent_tidptr;
6510     sigset_t sigmask;
6511 } new_thread_info;
6512 
6513 static void *clone_func(void *arg)
6514 {
6515     new_thread_info *info = arg;
6516     CPUArchState *env;
6517     CPUState *cpu;
6518     TaskState *ts;
6519 
6520     rcu_register_thread();
6521     tcg_register_thread();
6522     env = info->env;
6523     cpu = env_cpu(env);
6524     thread_cpu = cpu;
6525     ts = (TaskState *)cpu->opaque;
6526     info->tid = sys_gettid();
6527     task_settid(ts);
6528     if (info->child_tidptr)
6529         put_user_u32(info->tid, info->child_tidptr);
6530     if (info->parent_tidptr)
6531         put_user_u32(info->tid, info->parent_tidptr);
6532     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6533     /* Enable signals.  */
6534     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6535     /* Signal to the parent that we're ready.  */
6536     pthread_mutex_lock(&info->mutex);
6537     pthread_cond_broadcast(&info->cond);
6538     pthread_mutex_unlock(&info->mutex);
6539     /* Wait until the parent has finished initializing the tls state.  */
6540     pthread_mutex_lock(&clone_lock);
6541     pthread_mutex_unlock(&clone_lock);
6542     cpu_loop(env);
6543     /* never exits */
6544     return NULL;
6545 }
6546 
6547 /* do_fork() Must return host values and target errnos (unlike most
6548    do_*() functions). */
6549 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6550                    abi_ulong parent_tidptr, target_ulong newtls,
6551                    abi_ulong child_tidptr)
6552 {
6553     CPUState *cpu = env_cpu(env);
6554     int ret;
6555     TaskState *ts;
6556     CPUState *new_cpu;
6557     CPUArchState *new_env;
6558     sigset_t sigmask;
6559 
6560     flags &= ~CLONE_IGNORED_FLAGS;
6561 
6562     /* Emulate vfork() with fork() */
6563     if (flags & CLONE_VFORK)
6564         flags &= ~(CLONE_VFORK | CLONE_VM);
6565 
6566     if (flags & CLONE_VM) {
6567         TaskState *parent_ts = (TaskState *)cpu->opaque;
6568         new_thread_info info;
6569         pthread_attr_t attr;
6570 
6571         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6572             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6573             return -TARGET_EINVAL;
6574         }
6575 
6576         ts = g_new0(TaskState, 1);
6577         init_task_state(ts);
6578 
6579         /* Grab a mutex so that thread setup appears atomic.  */
6580         pthread_mutex_lock(&clone_lock);
6581 
6582         /*
6583          * If this is our first additional thread, we need to ensure we
6584          * generate code for parallel execution and flush old translations.
6585          * Do this now so that the copy gets CF_PARALLEL too.
6586          */
6587         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6588             cpu->tcg_cflags |= CF_PARALLEL;
6589             tb_flush(cpu);
6590         }
6591 
6592         /* we create a new CPU instance. */
6593         new_env = cpu_copy(env);
6594         /* Init regs that differ from the parent.  */
6595         cpu_clone_regs_child(new_env, newsp, flags);
6596         cpu_clone_regs_parent(env, flags);
6597         new_cpu = env_cpu(new_env);
6598         new_cpu->opaque = ts;
6599         ts->bprm = parent_ts->bprm;
6600         ts->info = parent_ts->info;
6601         ts->signal_mask = parent_ts->signal_mask;
6602 
6603         if (flags & CLONE_CHILD_CLEARTID) {
6604             ts->child_tidptr = child_tidptr;
6605         }
6606 
6607         if (flags & CLONE_SETTLS) {
6608             cpu_set_tls (new_env, newtls);
6609         }
6610 
6611         memset(&info, 0, sizeof(info));
6612         pthread_mutex_init(&info.mutex, NULL);
6613         pthread_mutex_lock(&info.mutex);
6614         pthread_cond_init(&info.cond, NULL);
6615         info.env = new_env;
6616         if (flags & CLONE_CHILD_SETTID) {
6617             info.child_tidptr = child_tidptr;
6618         }
6619         if (flags & CLONE_PARENT_SETTID) {
6620             info.parent_tidptr = parent_tidptr;
6621         }
6622 
6623         ret = pthread_attr_init(&attr);
6624         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6625         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6626         /* It is not safe to deliver signals until the child has finished
6627            initializing, so temporarily block all signals.  */
6628         sigfillset(&sigmask);
6629         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6630         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6631 
6632         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6633         /* TODO: Free new CPU state if thread creation failed.  */
6634 
6635         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6636         pthread_attr_destroy(&attr);
6637         if (ret == 0) {
6638             /* Wait for the child to initialize.  */
6639             pthread_cond_wait(&info.cond, &info.mutex);
6640             ret = info.tid;
6641         } else {
6642             ret = -1;
6643         }
6644         pthread_mutex_unlock(&info.mutex);
6645         pthread_cond_destroy(&info.cond);
6646         pthread_mutex_destroy(&info.mutex);
6647         pthread_mutex_unlock(&clone_lock);
6648     } else {
6649         /* if no CLONE_VM, we consider it is a fork */
6650         if (flags & CLONE_INVALID_FORK_FLAGS) {
6651             return -TARGET_EINVAL;
6652         }
6653 
6654         /* We can't support custom termination signals */
6655         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6656             return -TARGET_EINVAL;
6657         }
6658 
6659         if (block_signals()) {
6660             return -QEMU_ERESTARTSYS;
6661         }
6662 
6663         fork_start();
6664         ret = fork();
6665         if (ret == 0) {
6666             /* Child Process.  */
6667             cpu_clone_regs_child(env, newsp, flags);
6668             fork_end(1);
6669             /* There is a race condition here.  The parent process could
6670                theoretically read the TID in the child process before the child
6671                tid is set.  This would require using either ptrace
6672                (not implemented) or having *_tidptr to point at a shared memory
6673                mapping.  We can't repeat the spinlock hack used above because
6674                the child process gets its own copy of the lock.  */
6675             if (flags & CLONE_CHILD_SETTID)
6676                 put_user_u32(sys_gettid(), child_tidptr);
6677             if (flags & CLONE_PARENT_SETTID)
6678                 put_user_u32(sys_gettid(), parent_tidptr);
6679             ts = (TaskState *)cpu->opaque;
6680             if (flags & CLONE_SETTLS)
6681                 cpu_set_tls (env, newtls);
6682             if (flags & CLONE_CHILD_CLEARTID)
6683                 ts->child_tidptr = child_tidptr;
6684         } else {
6685             cpu_clone_regs_parent(env, flags);
6686             fork_end(0);
6687         }
6688     }
6689     return ret;
6690 }
6691 
6692 /* warning : doesn't handle linux specific flags... */
6693 static int target_to_host_fcntl_cmd(int cmd)
6694 {
6695     int ret;
6696 
6697     switch(cmd) {
6698     case TARGET_F_DUPFD:
6699     case TARGET_F_GETFD:
6700     case TARGET_F_SETFD:
6701     case TARGET_F_GETFL:
6702     case TARGET_F_SETFL:
6703     case TARGET_F_OFD_GETLK:
6704     case TARGET_F_OFD_SETLK:
6705     case TARGET_F_OFD_SETLKW:
6706         ret = cmd;
6707         break;
6708     case TARGET_F_GETLK:
6709         ret = F_GETLK64;
6710         break;
6711     case TARGET_F_SETLK:
6712         ret = F_SETLK64;
6713         break;
6714     case TARGET_F_SETLKW:
6715         ret = F_SETLKW64;
6716         break;
6717     case TARGET_F_GETOWN:
6718         ret = F_GETOWN;
6719         break;
6720     case TARGET_F_SETOWN:
6721         ret = F_SETOWN;
6722         break;
6723     case TARGET_F_GETSIG:
6724         ret = F_GETSIG;
6725         break;
6726     case TARGET_F_SETSIG:
6727         ret = F_SETSIG;
6728         break;
6729 #if TARGET_ABI_BITS == 32
6730     case TARGET_F_GETLK64:
6731         ret = F_GETLK64;
6732         break;
6733     case TARGET_F_SETLK64:
6734         ret = F_SETLK64;
6735         break;
6736     case TARGET_F_SETLKW64:
6737         ret = F_SETLKW64;
6738         break;
6739 #endif
6740     case TARGET_F_SETLEASE:
6741         ret = F_SETLEASE;
6742         break;
6743     case TARGET_F_GETLEASE:
6744         ret = F_GETLEASE;
6745         break;
6746 #ifdef F_DUPFD_CLOEXEC
6747     case TARGET_F_DUPFD_CLOEXEC:
6748         ret = F_DUPFD_CLOEXEC;
6749         break;
6750 #endif
6751     case TARGET_F_NOTIFY:
6752         ret = F_NOTIFY;
6753         break;
6754 #ifdef F_GETOWN_EX
6755     case TARGET_F_GETOWN_EX:
6756         ret = F_GETOWN_EX;
6757         break;
6758 #endif
6759 #ifdef F_SETOWN_EX
6760     case TARGET_F_SETOWN_EX:
6761         ret = F_SETOWN_EX;
6762         break;
6763 #endif
6764 #ifdef F_SETPIPE_SZ
6765     case TARGET_F_SETPIPE_SZ:
6766         ret = F_SETPIPE_SZ;
6767         break;
6768     case TARGET_F_GETPIPE_SZ:
6769         ret = F_GETPIPE_SZ;
6770         break;
6771 #endif
6772 #ifdef F_ADD_SEALS
6773     case TARGET_F_ADD_SEALS:
6774         ret = F_ADD_SEALS;
6775         break;
6776     case TARGET_F_GET_SEALS:
6777         ret = F_GET_SEALS;
6778         break;
6779 #endif
6780     default:
6781         ret = -TARGET_EINVAL;
6782         break;
6783     }
6784 
6785 #if defined(__powerpc64__)
6786     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6787      * is not supported by kernel. The glibc fcntl call actually adjusts
6788      * them to 5, 6 and 7 before making the syscall(). Since we make the
6789      * syscall directly, adjust to what is supported by the kernel.
6790      */
6791     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6792         ret -= F_GETLK64 - 5;
6793     }
6794 #endif
6795 
6796     return ret;
6797 }
6798 
6799 #define FLOCK_TRANSTBL \
6800     switch (type) { \
6801     TRANSTBL_CONVERT(F_RDLCK); \
6802     TRANSTBL_CONVERT(F_WRLCK); \
6803     TRANSTBL_CONVERT(F_UNLCK); \
6804     }
6805 
6806 static int target_to_host_flock(int type)
6807 {
6808 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6809     FLOCK_TRANSTBL
6810 #undef  TRANSTBL_CONVERT
6811     return -TARGET_EINVAL;
6812 }
6813 
6814 static int host_to_target_flock(int type)
6815 {
6816 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6817     FLOCK_TRANSTBL
6818 #undef  TRANSTBL_CONVERT
6819     /* if we don't know how to convert the value coming
6820      * from the host we copy to the target field as-is
6821      */
6822     return type;
6823 }
6824 
6825 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6826                                             abi_ulong target_flock_addr)
6827 {
6828     struct target_flock *target_fl;
6829     int l_type;
6830 
6831     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6832         return -TARGET_EFAULT;
6833     }
6834 
6835     __get_user(l_type, &target_fl->l_type);
6836     l_type = target_to_host_flock(l_type);
6837     if (l_type < 0) {
6838         return l_type;
6839     }
6840     fl->l_type = l_type;
6841     __get_user(fl->l_whence, &target_fl->l_whence);
6842     __get_user(fl->l_start, &target_fl->l_start);
6843     __get_user(fl->l_len, &target_fl->l_len);
6844     __get_user(fl->l_pid, &target_fl->l_pid);
6845     unlock_user_struct(target_fl, target_flock_addr, 0);
6846     return 0;
6847 }
6848 
6849 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6850                                           const struct flock64 *fl)
6851 {
6852     struct target_flock *target_fl;
6853     short l_type;
6854 
6855     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6856         return -TARGET_EFAULT;
6857     }
6858 
6859     l_type = host_to_target_flock(fl->l_type);
6860     __put_user(l_type, &target_fl->l_type);
6861     __put_user(fl->l_whence, &target_fl->l_whence);
6862     __put_user(fl->l_start, &target_fl->l_start);
6863     __put_user(fl->l_len, &target_fl->l_len);
6864     __put_user(fl->l_pid, &target_fl->l_pid);
6865     unlock_user_struct(target_fl, target_flock_addr, 1);
6866     return 0;
6867 }
6868 
6869 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6870 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6871 
6872 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6873 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6874                                                    abi_ulong target_flock_addr)
6875 {
6876     struct target_oabi_flock64 *target_fl;
6877     int l_type;
6878 
6879     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6880         return -TARGET_EFAULT;
6881     }
6882 
6883     __get_user(l_type, &target_fl->l_type);
6884     l_type = target_to_host_flock(l_type);
6885     if (l_type < 0) {
6886         return l_type;
6887     }
6888     fl->l_type = l_type;
6889     __get_user(fl->l_whence, &target_fl->l_whence);
6890     __get_user(fl->l_start, &target_fl->l_start);
6891     __get_user(fl->l_len, &target_fl->l_len);
6892     __get_user(fl->l_pid, &target_fl->l_pid);
6893     unlock_user_struct(target_fl, target_flock_addr, 0);
6894     return 0;
6895 }
6896 
6897 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6898                                                  const struct flock64 *fl)
6899 {
6900     struct target_oabi_flock64 *target_fl;
6901     short l_type;
6902 
6903     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6904         return -TARGET_EFAULT;
6905     }
6906 
6907     l_type = host_to_target_flock(fl->l_type);
6908     __put_user(l_type, &target_fl->l_type);
6909     __put_user(fl->l_whence, &target_fl->l_whence);
6910     __put_user(fl->l_start, &target_fl->l_start);
6911     __put_user(fl->l_len, &target_fl->l_len);
6912     __put_user(fl->l_pid, &target_fl->l_pid);
6913     unlock_user_struct(target_fl, target_flock_addr, 1);
6914     return 0;
6915 }
6916 #endif
6917 
6918 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6919                                               abi_ulong target_flock_addr)
6920 {
6921     struct target_flock64 *target_fl;
6922     int l_type;
6923 
6924     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6925         return -TARGET_EFAULT;
6926     }
6927 
6928     __get_user(l_type, &target_fl->l_type);
6929     l_type = target_to_host_flock(l_type);
6930     if (l_type < 0) {
6931         return l_type;
6932     }
6933     fl->l_type = l_type;
6934     __get_user(fl->l_whence, &target_fl->l_whence);
6935     __get_user(fl->l_start, &target_fl->l_start);
6936     __get_user(fl->l_len, &target_fl->l_len);
6937     __get_user(fl->l_pid, &target_fl->l_pid);
6938     unlock_user_struct(target_fl, target_flock_addr, 0);
6939     return 0;
6940 }
6941 
6942 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6943                                             const struct flock64 *fl)
6944 {
6945     struct target_flock64 *target_fl;
6946     short l_type;
6947 
6948     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6949         return -TARGET_EFAULT;
6950     }
6951 
6952     l_type = host_to_target_flock(fl->l_type);
6953     __put_user(l_type, &target_fl->l_type);
6954     __put_user(fl->l_whence, &target_fl->l_whence);
6955     __put_user(fl->l_start, &target_fl->l_start);
6956     __put_user(fl->l_len, &target_fl->l_len);
6957     __put_user(fl->l_pid, &target_fl->l_pid);
6958     unlock_user_struct(target_fl, target_flock_addr, 1);
6959     return 0;
6960 }
6961 
6962 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6963 {
6964     struct flock64 fl64;
6965 #ifdef F_GETOWN_EX
6966     struct f_owner_ex fox;
6967     struct target_f_owner_ex *target_fox;
6968 #endif
6969     abi_long ret;
6970     int host_cmd = target_to_host_fcntl_cmd(cmd);
6971 
6972     if (host_cmd == -TARGET_EINVAL)
6973 	    return host_cmd;
6974 
6975     switch(cmd) {
6976     case TARGET_F_GETLK:
6977         ret = copy_from_user_flock(&fl64, arg);
6978         if (ret) {
6979             return ret;
6980         }
6981         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6982         if (ret == 0) {
6983             ret = copy_to_user_flock(arg, &fl64);
6984         }
6985         break;
6986 
6987     case TARGET_F_SETLK:
6988     case TARGET_F_SETLKW:
6989         ret = copy_from_user_flock(&fl64, arg);
6990         if (ret) {
6991             return ret;
6992         }
6993         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6994         break;
6995 
6996     case TARGET_F_GETLK64:
6997     case TARGET_F_OFD_GETLK:
6998         ret = copy_from_user_flock64(&fl64, arg);
6999         if (ret) {
7000             return ret;
7001         }
7002         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7003         if (ret == 0) {
7004             ret = copy_to_user_flock64(arg, &fl64);
7005         }
7006         break;
7007     case TARGET_F_SETLK64:
7008     case TARGET_F_SETLKW64:
7009     case TARGET_F_OFD_SETLK:
7010     case TARGET_F_OFD_SETLKW:
7011         ret = copy_from_user_flock64(&fl64, arg);
7012         if (ret) {
7013             return ret;
7014         }
7015         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7016         break;
7017 
7018     case TARGET_F_GETFL:
7019         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7020         if (ret >= 0) {
7021             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7022         }
7023         break;
7024 
7025     case TARGET_F_SETFL:
7026         ret = get_errno(safe_fcntl(fd, host_cmd,
7027                                    target_to_host_bitmask(arg,
7028                                                           fcntl_flags_tbl)));
7029         break;
7030 
7031 #ifdef F_GETOWN_EX
7032     case TARGET_F_GETOWN_EX:
7033         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7034         if (ret >= 0) {
7035             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7036                 return -TARGET_EFAULT;
7037             target_fox->type = tswap32(fox.type);
7038             target_fox->pid = tswap32(fox.pid);
7039             unlock_user_struct(target_fox, arg, 1);
7040         }
7041         break;
7042 #endif
7043 
7044 #ifdef F_SETOWN_EX
7045     case TARGET_F_SETOWN_EX:
7046         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7047             return -TARGET_EFAULT;
7048         fox.type = tswap32(target_fox->type);
7049         fox.pid = tswap32(target_fox->pid);
7050         unlock_user_struct(target_fox, arg, 0);
7051         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7052         break;
7053 #endif
7054 
7055     case TARGET_F_SETSIG:
7056         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7057         break;
7058 
7059     case TARGET_F_GETSIG:
7060         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7061         break;
7062 
7063     case TARGET_F_SETOWN:
7064     case TARGET_F_GETOWN:
7065     case TARGET_F_SETLEASE:
7066     case TARGET_F_GETLEASE:
7067     case TARGET_F_SETPIPE_SZ:
7068     case TARGET_F_GETPIPE_SZ:
7069     case TARGET_F_ADD_SEALS:
7070     case TARGET_F_GET_SEALS:
7071         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7072         break;
7073 
7074     default:
7075         ret = get_errno(safe_fcntl(fd, cmd, arg));
7076         break;
7077     }
7078     return ret;
7079 }
7080 
7081 #ifdef USE_UID16
7082 
7083 static inline int high2lowuid(int uid)
7084 {
7085     if (uid > 65535)
7086         return 65534;
7087     else
7088         return uid;
7089 }
7090 
7091 static inline int high2lowgid(int gid)
7092 {
7093     if (gid > 65535)
7094         return 65534;
7095     else
7096         return gid;
7097 }
7098 
7099 static inline int low2highuid(int uid)
7100 {
7101     if ((int16_t)uid == -1)
7102         return -1;
7103     else
7104         return uid;
7105 }
7106 
7107 static inline int low2highgid(int gid)
7108 {
7109     if ((int16_t)gid == -1)
7110         return -1;
7111     else
7112         return gid;
7113 }
7114 static inline int tswapid(int id)
7115 {
7116     return tswap16(id);
7117 }
7118 
7119 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7120 
7121 #else /* !USE_UID16 */
7122 static inline int high2lowuid(int uid)
7123 {
7124     return uid;
7125 }
7126 static inline int high2lowgid(int gid)
7127 {
7128     return gid;
7129 }
7130 static inline int low2highuid(int uid)
7131 {
7132     return uid;
7133 }
7134 static inline int low2highgid(int gid)
7135 {
7136     return gid;
7137 }
7138 static inline int tswapid(int id)
7139 {
7140     return tswap32(id);
7141 }
7142 
7143 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7144 
7145 #endif /* USE_UID16 */
7146 
7147 /* We must do direct syscalls for setting UID/GID, because we want to
7148  * implement the Linux system call semantics of "change only for this thread",
7149  * not the libc/POSIX semantics of "change for all threads in process".
7150  * (See http://ewontfix.com/17/ for more details.)
7151  * We use the 32-bit version of the syscalls if present; if it is not
7152  * then either the host architecture supports 32-bit UIDs natively with
7153  * the standard syscall, or the 16-bit UID is the best we can do.
7154  */
7155 #ifdef __NR_setuid32
7156 #define __NR_sys_setuid __NR_setuid32
7157 #else
7158 #define __NR_sys_setuid __NR_setuid
7159 #endif
7160 #ifdef __NR_setgid32
7161 #define __NR_sys_setgid __NR_setgid32
7162 #else
7163 #define __NR_sys_setgid __NR_setgid
7164 #endif
7165 #ifdef __NR_setresuid32
7166 #define __NR_sys_setresuid __NR_setresuid32
7167 #else
7168 #define __NR_sys_setresuid __NR_setresuid
7169 #endif
7170 #ifdef __NR_setresgid32
7171 #define __NR_sys_setresgid __NR_setresgid32
7172 #else
7173 #define __NR_sys_setresgid __NR_setresgid
7174 #endif
7175 
7176 _syscall1(int, sys_setuid, uid_t, uid)
7177 _syscall1(int, sys_setgid, gid_t, gid)
7178 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7179 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7180 
7181 void syscall_init(void)
7182 {
7183     IOCTLEntry *ie;
7184     const argtype *arg_type;
7185     int size;
7186 
7187     thunk_init(STRUCT_MAX);
7188 
7189 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7190 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7191 #include "syscall_types.h"
7192 #undef STRUCT
7193 #undef STRUCT_SPECIAL
7194 
7195     /* we patch the ioctl size if necessary. We rely on the fact that
7196        no ioctl has all the bits at '1' in the size field */
7197     ie = ioctl_entries;
7198     while (ie->target_cmd != 0) {
7199         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7200             TARGET_IOC_SIZEMASK) {
7201             arg_type = ie->arg_type;
7202             if (arg_type[0] != TYPE_PTR) {
7203                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7204                         ie->target_cmd);
7205                 exit(1);
7206             }
7207             arg_type++;
7208             size = thunk_type_size(arg_type, 0);
7209             ie->target_cmd = (ie->target_cmd &
7210                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7211                 (size << TARGET_IOC_SIZESHIFT);
7212         }
7213 
7214         /* automatic consistency check if same arch */
7215 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7216     (defined(__x86_64__) && defined(TARGET_X86_64))
7217         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7218             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7219                     ie->name, ie->target_cmd, ie->host_cmd);
7220         }
7221 #endif
7222         ie++;
7223     }
7224 }
7225 
7226 #ifdef TARGET_NR_truncate64
7227 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7228                                          abi_long arg2,
7229                                          abi_long arg3,
7230                                          abi_long arg4)
7231 {
7232     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7233         arg2 = arg3;
7234         arg3 = arg4;
7235     }
7236     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7237 }
7238 #endif
7239 
7240 #ifdef TARGET_NR_ftruncate64
7241 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7242                                           abi_long arg2,
7243                                           abi_long arg3,
7244                                           abi_long arg4)
7245 {
7246     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7247         arg2 = arg3;
7248         arg3 = arg4;
7249     }
7250     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7251 }
7252 #endif
7253 
7254 #if defined(TARGET_NR_timer_settime) || \
7255     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7256 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7257                                                  abi_ulong target_addr)
7258 {
7259     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7260                                 offsetof(struct target_itimerspec,
7261                                          it_interval)) ||
7262         target_to_host_timespec(&host_its->it_value, target_addr +
7263                                 offsetof(struct target_itimerspec,
7264                                          it_value))) {
7265         return -TARGET_EFAULT;
7266     }
7267 
7268     return 0;
7269 }
7270 #endif
7271 
7272 #if defined(TARGET_NR_timer_settime64) || \
7273     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7274 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7275                                                    abi_ulong target_addr)
7276 {
7277     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7278                                   offsetof(struct target__kernel_itimerspec,
7279                                            it_interval)) ||
7280         target_to_host_timespec64(&host_its->it_value, target_addr +
7281                                   offsetof(struct target__kernel_itimerspec,
7282                                            it_value))) {
7283         return -TARGET_EFAULT;
7284     }
7285 
7286     return 0;
7287 }
7288 #endif
7289 
7290 #if ((defined(TARGET_NR_timerfd_gettime) || \
7291       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7292       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7293 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7294                                                  struct itimerspec *host_its)
7295 {
7296     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7297                                                        it_interval),
7298                                 &host_its->it_interval) ||
7299         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7300                                                        it_value),
7301                                 &host_its->it_value)) {
7302         return -TARGET_EFAULT;
7303     }
7304     return 0;
7305 }
7306 #endif
7307 
7308 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7309       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7310       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7311 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7312                                                    struct itimerspec *host_its)
7313 {
7314     if (host_to_target_timespec64(target_addr +
7315                                   offsetof(struct target__kernel_itimerspec,
7316                                            it_interval),
7317                                   &host_its->it_interval) ||
7318         host_to_target_timespec64(target_addr +
7319                                   offsetof(struct target__kernel_itimerspec,
7320                                            it_value),
7321                                   &host_its->it_value)) {
7322         return -TARGET_EFAULT;
7323     }
7324     return 0;
7325 }
7326 #endif
7327 
7328 #if defined(TARGET_NR_adjtimex) || \
7329     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7330 static inline abi_long target_to_host_timex(struct timex *host_tx,
7331                                             abi_long target_addr)
7332 {
7333     struct target_timex *target_tx;
7334 
7335     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7336         return -TARGET_EFAULT;
7337     }
7338 
7339     __get_user(host_tx->modes, &target_tx->modes);
7340     __get_user(host_tx->offset, &target_tx->offset);
7341     __get_user(host_tx->freq, &target_tx->freq);
7342     __get_user(host_tx->maxerror, &target_tx->maxerror);
7343     __get_user(host_tx->esterror, &target_tx->esterror);
7344     __get_user(host_tx->status, &target_tx->status);
7345     __get_user(host_tx->constant, &target_tx->constant);
7346     __get_user(host_tx->precision, &target_tx->precision);
7347     __get_user(host_tx->tolerance, &target_tx->tolerance);
7348     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7349     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7350     __get_user(host_tx->tick, &target_tx->tick);
7351     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7352     __get_user(host_tx->jitter, &target_tx->jitter);
7353     __get_user(host_tx->shift, &target_tx->shift);
7354     __get_user(host_tx->stabil, &target_tx->stabil);
7355     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7356     __get_user(host_tx->calcnt, &target_tx->calcnt);
7357     __get_user(host_tx->errcnt, &target_tx->errcnt);
7358     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7359     __get_user(host_tx->tai, &target_tx->tai);
7360 
7361     unlock_user_struct(target_tx, target_addr, 0);
7362     return 0;
7363 }
7364 
7365 static inline abi_long host_to_target_timex(abi_long target_addr,
7366                                             struct timex *host_tx)
7367 {
7368     struct target_timex *target_tx;
7369 
7370     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7371         return -TARGET_EFAULT;
7372     }
7373 
7374     __put_user(host_tx->modes, &target_tx->modes);
7375     __put_user(host_tx->offset, &target_tx->offset);
7376     __put_user(host_tx->freq, &target_tx->freq);
7377     __put_user(host_tx->maxerror, &target_tx->maxerror);
7378     __put_user(host_tx->esterror, &target_tx->esterror);
7379     __put_user(host_tx->status, &target_tx->status);
7380     __put_user(host_tx->constant, &target_tx->constant);
7381     __put_user(host_tx->precision, &target_tx->precision);
7382     __put_user(host_tx->tolerance, &target_tx->tolerance);
7383     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7384     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7385     __put_user(host_tx->tick, &target_tx->tick);
7386     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7387     __put_user(host_tx->jitter, &target_tx->jitter);
7388     __put_user(host_tx->shift, &target_tx->shift);
7389     __put_user(host_tx->stabil, &target_tx->stabil);
7390     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7391     __put_user(host_tx->calcnt, &target_tx->calcnt);
7392     __put_user(host_tx->errcnt, &target_tx->errcnt);
7393     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7394     __put_user(host_tx->tai, &target_tx->tai);
7395 
7396     unlock_user_struct(target_tx, target_addr, 1);
7397     return 0;
7398 }
7399 #endif
7400 
7401 
7402 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7403 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7404                                               abi_long target_addr)
7405 {
7406     struct target__kernel_timex *target_tx;
7407 
7408     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7409                                  offsetof(struct target__kernel_timex,
7410                                           time))) {
7411         return -TARGET_EFAULT;
7412     }
7413 
7414     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7415         return -TARGET_EFAULT;
7416     }
7417 
7418     __get_user(host_tx->modes, &target_tx->modes);
7419     __get_user(host_tx->offset, &target_tx->offset);
7420     __get_user(host_tx->freq, &target_tx->freq);
7421     __get_user(host_tx->maxerror, &target_tx->maxerror);
7422     __get_user(host_tx->esterror, &target_tx->esterror);
7423     __get_user(host_tx->status, &target_tx->status);
7424     __get_user(host_tx->constant, &target_tx->constant);
7425     __get_user(host_tx->precision, &target_tx->precision);
7426     __get_user(host_tx->tolerance, &target_tx->tolerance);
7427     __get_user(host_tx->tick, &target_tx->tick);
7428     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7429     __get_user(host_tx->jitter, &target_tx->jitter);
7430     __get_user(host_tx->shift, &target_tx->shift);
7431     __get_user(host_tx->stabil, &target_tx->stabil);
7432     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7433     __get_user(host_tx->calcnt, &target_tx->calcnt);
7434     __get_user(host_tx->errcnt, &target_tx->errcnt);
7435     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7436     __get_user(host_tx->tai, &target_tx->tai);
7437 
7438     unlock_user_struct(target_tx, target_addr, 0);
7439     return 0;
7440 }
7441 
7442 static inline abi_long host_to_target_timex64(abi_long target_addr,
7443                                               struct timex *host_tx)
7444 {
7445     struct target__kernel_timex *target_tx;
7446 
7447    if (copy_to_user_timeval64(target_addr +
7448                               offsetof(struct target__kernel_timex, time),
7449                               &host_tx->time)) {
7450         return -TARGET_EFAULT;
7451     }
7452 
7453     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7454         return -TARGET_EFAULT;
7455     }
7456 
7457     __put_user(host_tx->modes, &target_tx->modes);
7458     __put_user(host_tx->offset, &target_tx->offset);
7459     __put_user(host_tx->freq, &target_tx->freq);
7460     __put_user(host_tx->maxerror, &target_tx->maxerror);
7461     __put_user(host_tx->esterror, &target_tx->esterror);
7462     __put_user(host_tx->status, &target_tx->status);
7463     __put_user(host_tx->constant, &target_tx->constant);
7464     __put_user(host_tx->precision, &target_tx->precision);
7465     __put_user(host_tx->tolerance, &target_tx->tolerance);
7466     __put_user(host_tx->tick, &target_tx->tick);
7467     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7468     __put_user(host_tx->jitter, &target_tx->jitter);
7469     __put_user(host_tx->shift, &target_tx->shift);
7470     __put_user(host_tx->stabil, &target_tx->stabil);
7471     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7472     __put_user(host_tx->calcnt, &target_tx->calcnt);
7473     __put_user(host_tx->errcnt, &target_tx->errcnt);
7474     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7475     __put_user(host_tx->tai, &target_tx->tai);
7476 
7477     unlock_user_struct(target_tx, target_addr, 1);
7478     return 0;
7479 }
7480 #endif
7481 
7482 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7483 #define sigev_notify_thread_id _sigev_un._tid
7484 #endif
7485 
7486 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7487                                                abi_ulong target_addr)
7488 {
7489     struct target_sigevent *target_sevp;
7490 
7491     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7492         return -TARGET_EFAULT;
7493     }
7494 
7495     /* This union is awkward on 64 bit systems because it has a 32 bit
7496      * integer and a pointer in it; we follow the conversion approach
7497      * used for handling sigval types in signal.c so the guest should get
7498      * the correct value back even if we did a 64 bit byteswap and it's
7499      * using the 32 bit integer.
7500      */
7501     host_sevp->sigev_value.sival_ptr =
7502         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7503     host_sevp->sigev_signo =
7504         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7505     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7506     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7507 
7508     unlock_user_struct(target_sevp, target_addr, 1);
7509     return 0;
7510 }
7511 
7512 #if defined(TARGET_NR_mlockall)
7513 static inline int target_to_host_mlockall_arg(int arg)
7514 {
7515     int result = 0;
7516 
7517     if (arg & TARGET_MCL_CURRENT) {
7518         result |= MCL_CURRENT;
7519     }
7520     if (arg & TARGET_MCL_FUTURE) {
7521         result |= MCL_FUTURE;
7522     }
7523 #ifdef MCL_ONFAULT
7524     if (arg & TARGET_MCL_ONFAULT) {
7525         result |= MCL_ONFAULT;
7526     }
7527 #endif
7528 
7529     return result;
7530 }
7531 #endif
7532 
7533 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7534      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7535      defined(TARGET_NR_newfstatat))
7536 static inline abi_long host_to_target_stat64(void *cpu_env,
7537                                              abi_ulong target_addr,
7538                                              struct stat *host_st)
7539 {
7540 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7541     if (((CPUARMState *)cpu_env)->eabi) {
7542         struct target_eabi_stat64 *target_st;
7543 
7544         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7545             return -TARGET_EFAULT;
7546         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7547         __put_user(host_st->st_dev, &target_st->st_dev);
7548         __put_user(host_st->st_ino, &target_st->st_ino);
7549 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7550         __put_user(host_st->st_ino, &target_st->__st_ino);
7551 #endif
7552         __put_user(host_st->st_mode, &target_st->st_mode);
7553         __put_user(host_st->st_nlink, &target_st->st_nlink);
7554         __put_user(host_st->st_uid, &target_st->st_uid);
7555         __put_user(host_st->st_gid, &target_st->st_gid);
7556         __put_user(host_st->st_rdev, &target_st->st_rdev);
7557         __put_user(host_st->st_size, &target_st->st_size);
7558         __put_user(host_st->st_blksize, &target_st->st_blksize);
7559         __put_user(host_st->st_blocks, &target_st->st_blocks);
7560         __put_user(host_st->st_atime, &target_st->target_st_atime);
7561         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7562         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7563 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7564         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7565         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7566         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7567 #endif
7568         unlock_user_struct(target_st, target_addr, 1);
7569     } else
7570 #endif
7571     {
7572 #if defined(TARGET_HAS_STRUCT_STAT64)
7573         struct target_stat64 *target_st;
7574 #else
7575         struct target_stat *target_st;
7576 #endif
7577 
7578         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7579             return -TARGET_EFAULT;
7580         memset(target_st, 0, sizeof(*target_st));
7581         __put_user(host_st->st_dev, &target_st->st_dev);
7582         __put_user(host_st->st_ino, &target_st->st_ino);
7583 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7584         __put_user(host_st->st_ino, &target_st->__st_ino);
7585 #endif
7586         __put_user(host_st->st_mode, &target_st->st_mode);
7587         __put_user(host_st->st_nlink, &target_st->st_nlink);
7588         __put_user(host_st->st_uid, &target_st->st_uid);
7589         __put_user(host_st->st_gid, &target_st->st_gid);
7590         __put_user(host_st->st_rdev, &target_st->st_rdev);
7591         /* XXX: better use of kernel struct */
7592         __put_user(host_st->st_size, &target_st->st_size);
7593         __put_user(host_st->st_blksize, &target_st->st_blksize);
7594         __put_user(host_st->st_blocks, &target_st->st_blocks);
7595         __put_user(host_st->st_atime, &target_st->target_st_atime);
7596         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7597         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7598 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7599         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7600         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7601         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7602 #endif
7603         unlock_user_struct(target_st, target_addr, 1);
7604     }
7605 
7606     return 0;
7607 }
7608 #endif
7609 
7610 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7611 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7612                                             abi_ulong target_addr)
7613 {
7614     struct target_statx *target_stx;
7615 
7616     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7617         return -TARGET_EFAULT;
7618     }
7619     memset(target_stx, 0, sizeof(*target_stx));
7620 
7621     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7622     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7623     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7624     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7625     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7626     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7627     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7628     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7629     __put_user(host_stx->stx_size, &target_stx->stx_size);
7630     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7631     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7632     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7633     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7634     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7635     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7636     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7637     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7638     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7639     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7640     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7641     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7642     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7643     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7644 
7645     unlock_user_struct(target_stx, target_addr, 1);
7646 
7647     return 0;
7648 }
7649 #endif
7650 
7651 static int do_sys_futex(int *uaddr, int op, int val,
7652                          const struct timespec *timeout, int *uaddr2,
7653                          int val3)
7654 {
7655 #if HOST_LONG_BITS == 64
7656 #if defined(__NR_futex)
7657     /* always a 64-bit time_t, it doesn't define _time64 version  */
7658     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7659 
7660 #endif
7661 #else /* HOST_LONG_BITS == 64 */
7662 #if defined(__NR_futex_time64)
7663     if (sizeof(timeout->tv_sec) == 8) {
7664         /* _time64 function on 32bit arch */
7665         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7666     }
7667 #endif
7668 #if defined(__NR_futex)
7669     /* old function on 32bit arch */
7670     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7671 #endif
7672 #endif /* HOST_LONG_BITS == 64 */
7673     g_assert_not_reached();
7674 }
7675 
7676 static int do_safe_futex(int *uaddr, int op, int val,
7677                          const struct timespec *timeout, int *uaddr2,
7678                          int val3)
7679 {
7680 #if HOST_LONG_BITS == 64
7681 #if defined(__NR_futex)
7682     /* always a 64-bit time_t, it doesn't define _time64 version  */
7683     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7684 #endif
7685 #else /* HOST_LONG_BITS == 64 */
7686 #if defined(__NR_futex_time64)
7687     if (sizeof(timeout->tv_sec) == 8) {
7688         /* _time64 function on 32bit arch */
7689         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7690                                            val3));
7691     }
7692 #endif
7693 #if defined(__NR_futex)
7694     /* old function on 32bit arch */
7695     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7696 #endif
7697 #endif /* HOST_LONG_BITS == 64 */
7698     return -TARGET_ENOSYS;
7699 }
7700 
7701 /* ??? Using host futex calls even when target atomic operations
7702    are not really atomic probably breaks things.  However implementing
7703    futexes locally would make futexes shared between multiple processes
7704    tricky.  However they're probably useless because guest atomic
7705    operations won't work either.  */
7706 #if defined(TARGET_NR_futex)
7707 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7708                     target_ulong timeout, target_ulong uaddr2, int val3)
7709 {
7710     struct timespec ts, *pts;
7711     int base_op;
7712 
7713     /* ??? We assume FUTEX_* constants are the same on both host
7714        and target.  */
7715 #ifdef FUTEX_CMD_MASK
7716     base_op = op & FUTEX_CMD_MASK;
7717 #else
7718     base_op = op;
7719 #endif
7720     switch (base_op) {
7721     case FUTEX_WAIT:
7722     case FUTEX_WAIT_BITSET:
7723         if (timeout) {
7724             pts = &ts;
7725             target_to_host_timespec(pts, timeout);
7726         } else {
7727             pts = NULL;
7728         }
7729         return do_safe_futex(g2h(cpu, uaddr),
7730                              op, tswap32(val), pts, NULL, val3);
7731     case FUTEX_WAKE:
7732         return do_safe_futex(g2h(cpu, uaddr),
7733                              op, val, NULL, NULL, 0);
7734     case FUTEX_FD:
7735         return do_safe_futex(g2h(cpu, uaddr),
7736                              op, val, NULL, NULL, 0);
7737     case FUTEX_REQUEUE:
7738     case FUTEX_CMP_REQUEUE:
7739     case FUTEX_WAKE_OP:
7740         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7741            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7742            But the prototype takes a `struct timespec *'; insert casts
7743            to satisfy the compiler.  We do not need to tswap TIMEOUT
7744            since it's not compared to guest memory.  */
7745         pts = (struct timespec *)(uintptr_t) timeout;
7746         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7747                              (base_op == FUTEX_CMP_REQUEUE
7748                               ? tswap32(val3) : val3));
7749     default:
7750         return -TARGET_ENOSYS;
7751     }
7752 }
7753 #endif
7754 
7755 #if defined(TARGET_NR_futex_time64)
7756 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7757                            int val, target_ulong timeout,
7758                            target_ulong uaddr2, int val3)
7759 {
7760     struct timespec ts, *pts;
7761     int base_op;
7762 
7763     /* ??? We assume FUTEX_* constants are the same on both host
7764        and target.  */
7765 #ifdef FUTEX_CMD_MASK
7766     base_op = op & FUTEX_CMD_MASK;
7767 #else
7768     base_op = op;
7769 #endif
7770     switch (base_op) {
7771     case FUTEX_WAIT:
7772     case FUTEX_WAIT_BITSET:
7773         if (timeout) {
7774             pts = &ts;
7775             if (target_to_host_timespec64(pts, timeout)) {
7776                 return -TARGET_EFAULT;
7777             }
7778         } else {
7779             pts = NULL;
7780         }
7781         return do_safe_futex(g2h(cpu, uaddr), op,
7782                              tswap32(val), pts, NULL, val3);
7783     case FUTEX_WAKE:
7784         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7785     case FUTEX_FD:
7786         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7787     case FUTEX_REQUEUE:
7788     case FUTEX_CMP_REQUEUE:
7789     case FUTEX_WAKE_OP:
7790         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7791            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7792            But the prototype takes a `struct timespec *'; insert casts
7793            to satisfy the compiler.  We do not need to tswap TIMEOUT
7794            since it's not compared to guest memory.  */
7795         pts = (struct timespec *)(uintptr_t) timeout;
7796         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7797                              (base_op == FUTEX_CMP_REQUEUE
7798                               ? tswap32(val3) : val3));
7799     default:
7800         return -TARGET_ENOSYS;
7801     }
7802 }
7803 #endif
7804 
7805 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7806 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7807                                      abi_long handle, abi_long mount_id,
7808                                      abi_long flags)
7809 {
7810     struct file_handle *target_fh;
7811     struct file_handle *fh;
7812     int mid = 0;
7813     abi_long ret;
7814     char *name;
7815     unsigned int size, total_size;
7816 
7817     if (get_user_s32(size, handle)) {
7818         return -TARGET_EFAULT;
7819     }
7820 
7821     name = lock_user_string(pathname);
7822     if (!name) {
7823         return -TARGET_EFAULT;
7824     }
7825 
7826     total_size = sizeof(struct file_handle) + size;
7827     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7828     if (!target_fh) {
7829         unlock_user(name, pathname, 0);
7830         return -TARGET_EFAULT;
7831     }
7832 
7833     fh = g_malloc0(total_size);
7834     fh->handle_bytes = size;
7835 
7836     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7837     unlock_user(name, pathname, 0);
7838 
7839     /* man name_to_handle_at(2):
7840      * Other than the use of the handle_bytes field, the caller should treat
7841      * the file_handle structure as an opaque data type
7842      */
7843 
7844     memcpy(target_fh, fh, total_size);
7845     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7846     target_fh->handle_type = tswap32(fh->handle_type);
7847     g_free(fh);
7848     unlock_user(target_fh, handle, total_size);
7849 
7850     if (put_user_s32(mid, mount_id)) {
7851         return -TARGET_EFAULT;
7852     }
7853 
7854     return ret;
7855 
7856 }
7857 #endif
7858 
7859 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7860 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7861                                      abi_long flags)
7862 {
7863     struct file_handle *target_fh;
7864     struct file_handle *fh;
7865     unsigned int size, total_size;
7866     abi_long ret;
7867 
7868     if (get_user_s32(size, handle)) {
7869         return -TARGET_EFAULT;
7870     }
7871 
7872     total_size = sizeof(struct file_handle) + size;
7873     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7874     if (!target_fh) {
7875         return -TARGET_EFAULT;
7876     }
7877 
7878     fh = g_memdup(target_fh, total_size);
7879     fh->handle_bytes = size;
7880     fh->handle_type = tswap32(target_fh->handle_type);
7881 
7882     ret = get_errno(open_by_handle_at(mount_fd, fh,
7883                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7884 
7885     g_free(fh);
7886 
7887     unlock_user(target_fh, handle, total_size);
7888 
7889     return ret;
7890 }
7891 #endif
7892 
7893 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7894 
7895 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7896 {
7897     int host_flags;
7898     target_sigset_t *target_mask;
7899     sigset_t host_mask;
7900     abi_long ret;
7901 
7902     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7903         return -TARGET_EINVAL;
7904     }
7905     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7906         return -TARGET_EFAULT;
7907     }
7908 
7909     target_to_host_sigset(&host_mask, target_mask);
7910 
7911     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7912 
7913     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7914     if (ret >= 0) {
7915         fd_trans_register(ret, &target_signalfd_trans);
7916     }
7917 
7918     unlock_user_struct(target_mask, mask, 0);
7919 
7920     return ret;
7921 }
7922 #endif
7923 
7924 /* Map host to target signal numbers for the wait family of syscalls.
7925    Assume all other status bits are the same.  */
7926 int host_to_target_waitstatus(int status)
7927 {
7928     if (WIFSIGNALED(status)) {
7929         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7930     }
7931     if (WIFSTOPPED(status)) {
7932         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7933                | (status & 0xff);
7934     }
7935     return status;
7936 }
7937 
7938 static int open_self_cmdline(void *cpu_env, int fd)
7939 {
7940     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7941     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7942     int i;
7943 
7944     for (i = 0; i < bprm->argc; i++) {
7945         size_t len = strlen(bprm->argv[i]) + 1;
7946 
7947         if (write(fd, bprm->argv[i], len) != len) {
7948             return -1;
7949         }
7950     }
7951 
7952     return 0;
7953 }
7954 
7955 static int open_self_maps(void *cpu_env, int fd)
7956 {
7957     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7958     TaskState *ts = cpu->opaque;
7959     GSList *map_info = read_self_maps();
7960     GSList *s;
7961     int count;
7962 
7963     for (s = map_info; s; s = g_slist_next(s)) {
7964         MapInfo *e = (MapInfo *) s->data;
7965 
7966         if (h2g_valid(e->start)) {
7967             unsigned long min = e->start;
7968             unsigned long max = e->end;
7969             int flags = page_get_flags(h2g(min));
7970             const char *path;
7971 
7972             max = h2g_valid(max - 1) ?
7973                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7974 
7975             if (page_check_range(h2g(min), max - min, flags) == -1) {
7976                 continue;
7977             }
7978 
7979             if (h2g(min) == ts->info->stack_limit) {
7980                 path = "[stack]";
7981             } else {
7982                 path = e->path;
7983             }
7984 
7985             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7986                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7987                             h2g(min), h2g(max - 1) + 1,
7988                             (flags & PAGE_READ) ? 'r' : '-',
7989                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7990                             (flags & PAGE_EXEC) ? 'x' : '-',
7991                             e->is_priv ? 'p' : '-',
7992                             (uint64_t) e->offset, e->dev, e->inode);
7993             if (path) {
7994                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7995             } else {
7996                 dprintf(fd, "\n");
7997             }
7998         }
7999     }
8000 
8001     free_self_maps(map_info);
8002 
8003 #ifdef TARGET_VSYSCALL_PAGE
8004     /*
8005      * We only support execution from the vsyscall page.
8006      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8007      */
8008     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8009                     " --xp 00000000 00:00 0",
8010                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8011     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8012 #endif
8013 
8014     return 0;
8015 }
8016 
8017 static int open_self_stat(void *cpu_env, int fd)
8018 {
8019     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8020     TaskState *ts = cpu->opaque;
8021     g_autoptr(GString) buf = g_string_new(NULL);
8022     int i;
8023 
8024     for (i = 0; i < 44; i++) {
8025         if (i == 0) {
8026             /* pid */
8027             g_string_printf(buf, FMT_pid " ", getpid());
8028         } else if (i == 1) {
8029             /* app name */
8030             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8031             bin = bin ? bin + 1 : ts->bprm->argv[0];
8032             g_string_printf(buf, "(%.15s) ", bin);
8033         } else if (i == 3) {
8034             /* ppid */
8035             g_string_printf(buf, FMT_pid " ", getppid());
8036         } else if (i == 27) {
8037             /* stack bottom */
8038             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8039         } else {
8040             /* for the rest, there is MasterCard */
8041             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8042         }
8043 
8044         if (write(fd, buf->str, buf->len) != buf->len) {
8045             return -1;
8046         }
8047     }
8048 
8049     return 0;
8050 }
8051 
8052 static int open_self_auxv(void *cpu_env, int fd)
8053 {
8054     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8055     TaskState *ts = cpu->opaque;
8056     abi_ulong auxv = ts->info->saved_auxv;
8057     abi_ulong len = ts->info->auxv_len;
8058     char *ptr;
8059 
8060     /*
8061      * Auxiliary vector is stored in target process stack.
8062      * read in whole auxv vector and copy it to file
8063      */
8064     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8065     if (ptr != NULL) {
8066         while (len > 0) {
8067             ssize_t r;
8068             r = write(fd, ptr, len);
8069             if (r <= 0) {
8070                 break;
8071             }
8072             len -= r;
8073             ptr += r;
8074         }
8075         lseek(fd, 0, SEEK_SET);
8076         unlock_user(ptr, auxv, len);
8077     }
8078 
8079     return 0;
8080 }
8081 
8082 static int is_proc_myself(const char *filename, const char *entry)
8083 {
8084     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8085         filename += strlen("/proc/");
8086         if (!strncmp(filename, "self/", strlen("self/"))) {
8087             filename += strlen("self/");
8088         } else if (*filename >= '1' && *filename <= '9') {
8089             char myself[80];
8090             snprintf(myself, sizeof(myself), "%d/", getpid());
8091             if (!strncmp(filename, myself, strlen(myself))) {
8092                 filename += strlen(myself);
8093             } else {
8094                 return 0;
8095             }
8096         } else {
8097             return 0;
8098         }
8099         if (!strcmp(filename, entry)) {
8100             return 1;
8101         }
8102     }
8103     return 0;
8104 }
8105 
8106 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8107     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8108 static int is_proc(const char *filename, const char *entry)
8109 {
8110     return strcmp(filename, entry) == 0;
8111 }
8112 #endif
8113 
8114 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8115 static int open_net_route(void *cpu_env, int fd)
8116 {
8117     FILE *fp;
8118     char *line = NULL;
8119     size_t len = 0;
8120     ssize_t read;
8121 
8122     fp = fopen("/proc/net/route", "r");
8123     if (fp == NULL) {
8124         return -1;
8125     }
8126 
8127     /* read header */
8128 
8129     read = getline(&line, &len, fp);
8130     dprintf(fd, "%s", line);
8131 
8132     /* read routes */
8133 
8134     while ((read = getline(&line, &len, fp)) != -1) {
8135         char iface[16];
8136         uint32_t dest, gw, mask;
8137         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8138         int fields;
8139 
8140         fields = sscanf(line,
8141                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8142                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8143                         &mask, &mtu, &window, &irtt);
8144         if (fields != 11) {
8145             continue;
8146         }
8147         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8148                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8149                 metric, tswap32(mask), mtu, window, irtt);
8150     }
8151 
8152     free(line);
8153     fclose(fp);
8154 
8155     return 0;
8156 }
8157 #endif
8158 
8159 #if defined(TARGET_SPARC)
8160 static int open_cpuinfo(void *cpu_env, int fd)
8161 {
8162     dprintf(fd, "type\t\t: sun4u\n");
8163     return 0;
8164 }
8165 #endif
8166 
8167 #if defined(TARGET_HPPA)
8168 static int open_cpuinfo(void *cpu_env, int fd)
8169 {
8170     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8171     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8172     dprintf(fd, "capabilities\t: os32\n");
8173     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8174     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8175     return 0;
8176 }
8177 #endif
8178 
8179 #if defined(TARGET_M68K)
8180 static int open_hardware(void *cpu_env, int fd)
8181 {
8182     dprintf(fd, "Model:\t\tqemu-m68k\n");
8183     return 0;
8184 }
8185 #endif
8186 
8187 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8188 {
8189     struct fake_open {
8190         const char *filename;
8191         int (*fill)(void *cpu_env, int fd);
8192         int (*cmp)(const char *s1, const char *s2);
8193     };
8194     const struct fake_open *fake_open;
8195     static const struct fake_open fakes[] = {
8196         { "maps", open_self_maps, is_proc_myself },
8197         { "stat", open_self_stat, is_proc_myself },
8198         { "auxv", open_self_auxv, is_proc_myself },
8199         { "cmdline", open_self_cmdline, is_proc_myself },
8200 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8201         { "/proc/net/route", open_net_route, is_proc },
8202 #endif
8203 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8204         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8205 #endif
8206 #if defined(TARGET_M68K)
8207         { "/proc/hardware", open_hardware, is_proc },
8208 #endif
8209         { NULL, NULL, NULL }
8210     };
8211 
8212     if (is_proc_myself(pathname, "exe")) {
8213         int execfd = qemu_getauxval(AT_EXECFD);
8214         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8215     }
8216 
8217     for (fake_open = fakes; fake_open->filename; fake_open++) {
8218         if (fake_open->cmp(pathname, fake_open->filename)) {
8219             break;
8220         }
8221     }
8222 
8223     if (fake_open->filename) {
8224         const char *tmpdir;
8225         char filename[PATH_MAX];
8226         int fd, r;
8227 
8228         /* create temporary file to map stat to */
8229         tmpdir = getenv("TMPDIR");
8230         if (!tmpdir)
8231             tmpdir = "/tmp";
8232         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8233         fd = mkstemp(filename);
8234         if (fd < 0) {
8235             return fd;
8236         }
8237         unlink(filename);
8238 
8239         if ((r = fake_open->fill(cpu_env, fd))) {
8240             int e = errno;
8241             close(fd);
8242             errno = e;
8243             return r;
8244         }
8245         lseek(fd, 0, SEEK_SET);
8246 
8247         return fd;
8248     }
8249 
8250     return safe_openat(dirfd, path(pathname), flags, mode);
8251 }
8252 
8253 #define TIMER_MAGIC 0x0caf0000
8254 #define TIMER_MAGIC_MASK 0xffff0000
8255 
8256 /* Convert QEMU provided timer ID back to internal 16bit index format */
8257 static target_timer_t get_timer_id(abi_long arg)
8258 {
8259     target_timer_t timerid = arg;
8260 
8261     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8262         return -TARGET_EINVAL;
8263     }
8264 
8265     timerid &= 0xffff;
8266 
8267     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8268         return -TARGET_EINVAL;
8269     }
8270 
8271     return timerid;
8272 }
8273 
8274 static int target_to_host_cpu_mask(unsigned long *host_mask,
8275                                    size_t host_size,
8276                                    abi_ulong target_addr,
8277                                    size_t target_size)
8278 {
8279     unsigned target_bits = sizeof(abi_ulong) * 8;
8280     unsigned host_bits = sizeof(*host_mask) * 8;
8281     abi_ulong *target_mask;
8282     unsigned i, j;
8283 
8284     assert(host_size >= target_size);
8285 
8286     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8287     if (!target_mask) {
8288         return -TARGET_EFAULT;
8289     }
8290     memset(host_mask, 0, host_size);
8291 
8292     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8293         unsigned bit = i * target_bits;
8294         abi_ulong val;
8295 
8296         __get_user(val, &target_mask[i]);
8297         for (j = 0; j < target_bits; j++, bit++) {
8298             if (val & (1UL << j)) {
8299                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8300             }
8301         }
8302     }
8303 
8304     unlock_user(target_mask, target_addr, 0);
8305     return 0;
8306 }
8307 
8308 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8309                                    size_t host_size,
8310                                    abi_ulong target_addr,
8311                                    size_t target_size)
8312 {
8313     unsigned target_bits = sizeof(abi_ulong) * 8;
8314     unsigned host_bits = sizeof(*host_mask) * 8;
8315     abi_ulong *target_mask;
8316     unsigned i, j;
8317 
8318     assert(host_size >= target_size);
8319 
8320     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8321     if (!target_mask) {
8322         return -TARGET_EFAULT;
8323     }
8324 
8325     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8326         unsigned bit = i * target_bits;
8327         abi_ulong val = 0;
8328 
8329         for (j = 0; j < target_bits; j++, bit++) {
8330             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8331                 val |= 1UL << j;
8332             }
8333         }
8334         __put_user(val, &target_mask[i]);
8335     }
8336 
8337     unlock_user(target_mask, target_addr, target_size);
8338     return 0;
8339 }
8340 
8341 #ifdef TARGET_NR_getdents
8342 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8343 {
8344     g_autofree void *hdirp = NULL;
8345     void *tdirp;
8346     int hlen, hoff, toff;
8347     int hreclen, treclen;
8348     off64_t prev_diroff = 0;
8349 
8350     hdirp = g_try_malloc(count);
8351     if (!hdirp) {
8352         return -TARGET_ENOMEM;
8353     }
8354 
8355 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8356     hlen = sys_getdents(dirfd, hdirp, count);
8357 #else
8358     hlen = sys_getdents64(dirfd, hdirp, count);
8359 #endif
8360 
8361     hlen = get_errno(hlen);
8362     if (is_error(hlen)) {
8363         return hlen;
8364     }
8365 
8366     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8367     if (!tdirp) {
8368         return -TARGET_EFAULT;
8369     }
8370 
8371     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8372 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8373         struct linux_dirent *hde = hdirp + hoff;
8374 #else
8375         struct linux_dirent64 *hde = hdirp + hoff;
8376 #endif
8377         struct target_dirent *tde = tdirp + toff;
8378         int namelen;
8379         uint8_t type;
8380 
8381         namelen = strlen(hde->d_name);
8382         hreclen = hde->d_reclen;
8383         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8384         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8385 
8386         if (toff + treclen > count) {
8387             /*
8388              * If the host struct is smaller than the target struct, or
8389              * requires less alignment and thus packs into less space,
8390              * then the host can return more entries than we can pass
8391              * on to the guest.
8392              */
8393             if (toff == 0) {
8394                 toff = -TARGET_EINVAL; /* result buffer is too small */
8395                 break;
8396             }
8397             /*
8398              * Return what we have, resetting the file pointer to the
8399              * location of the first record not returned.
8400              */
8401             lseek64(dirfd, prev_diroff, SEEK_SET);
8402             break;
8403         }
8404 
8405         prev_diroff = hde->d_off;
8406         tde->d_ino = tswapal(hde->d_ino);
8407         tde->d_off = tswapal(hde->d_off);
8408         tde->d_reclen = tswap16(treclen);
8409         memcpy(tde->d_name, hde->d_name, namelen + 1);
8410 
8411         /*
8412          * The getdents type is in what was formerly a padding byte at the
8413          * end of the structure.
8414          */
8415 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8416         type = *((uint8_t *)hde + hreclen - 1);
8417 #else
8418         type = hde->d_type;
8419 #endif
8420         *((uint8_t *)tde + treclen - 1) = type;
8421     }
8422 
8423     unlock_user(tdirp, arg2, toff);
8424     return toff;
8425 }
8426 #endif /* TARGET_NR_getdents */
8427 
8428 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8429 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8430 {
8431     g_autofree void *hdirp = NULL;
8432     void *tdirp;
8433     int hlen, hoff, toff;
8434     int hreclen, treclen;
8435     off64_t prev_diroff = 0;
8436 
8437     hdirp = g_try_malloc(count);
8438     if (!hdirp) {
8439         return -TARGET_ENOMEM;
8440     }
8441 
8442     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8443     if (is_error(hlen)) {
8444         return hlen;
8445     }
8446 
8447     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8448     if (!tdirp) {
8449         return -TARGET_EFAULT;
8450     }
8451 
8452     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8453         struct linux_dirent64 *hde = hdirp + hoff;
8454         struct target_dirent64 *tde = tdirp + toff;
8455         int namelen;
8456 
8457         namelen = strlen(hde->d_name) + 1;
8458         hreclen = hde->d_reclen;
8459         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8460         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8461 
8462         if (toff + treclen > count) {
8463             /*
8464              * If the host struct is smaller than the target struct, or
8465              * requires less alignment and thus packs into less space,
8466              * then the host can return more entries than we can pass
8467              * on to the guest.
8468              */
8469             if (toff == 0) {
8470                 toff = -TARGET_EINVAL; /* result buffer is too small */
8471                 break;
8472             }
8473             /*
8474              * Return what we have, resetting the file pointer to the
8475              * location of the first record not returned.
8476              */
8477             lseek64(dirfd, prev_diroff, SEEK_SET);
8478             break;
8479         }
8480 
8481         prev_diroff = hde->d_off;
8482         tde->d_ino = tswap64(hde->d_ino);
8483         tde->d_off = tswap64(hde->d_off);
8484         tde->d_reclen = tswap16(treclen);
8485         tde->d_type = hde->d_type;
8486         memcpy(tde->d_name, hde->d_name, namelen);
8487     }
8488 
8489     unlock_user(tdirp, arg2, toff);
8490     return toff;
8491 }
8492 #endif /* TARGET_NR_getdents64 */
8493 
8494 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8495 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8496 #endif
8497 
8498 /* This is an internal helper for do_syscall so that it is easier
8499  * to have a single return point, so that actions, such as logging
8500  * of syscall results, can be performed.
8501  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8502  */
8503 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8504                             abi_long arg2, abi_long arg3, abi_long arg4,
8505                             abi_long arg5, abi_long arg6, abi_long arg7,
8506                             abi_long arg8)
8507 {
8508     CPUState *cpu = env_cpu(cpu_env);
8509     abi_long ret;
8510 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8511     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8512     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8513     || defined(TARGET_NR_statx)
8514     struct stat st;
8515 #endif
8516 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8517     || defined(TARGET_NR_fstatfs)
8518     struct statfs stfs;
8519 #endif
8520     void *p;
8521 
8522     switch(num) {
8523     case TARGET_NR_exit:
8524         /* In old applications this may be used to implement _exit(2).
8525            However in threaded applications it is used for thread termination,
8526            and _exit_group is used for application termination.
8527            Do thread termination if we have more then one thread.  */
8528 
8529         if (block_signals()) {
8530             return -QEMU_ERESTARTSYS;
8531         }
8532 
8533         pthread_mutex_lock(&clone_lock);
8534 
8535         if (CPU_NEXT(first_cpu)) {
8536             TaskState *ts = cpu->opaque;
8537 
8538             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8539             object_unref(OBJECT(cpu));
8540             /*
8541              * At this point the CPU should be unrealized and removed
8542              * from cpu lists. We can clean-up the rest of the thread
8543              * data without the lock held.
8544              */
8545 
8546             pthread_mutex_unlock(&clone_lock);
8547 
8548             if (ts->child_tidptr) {
8549                 put_user_u32(0, ts->child_tidptr);
8550                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8551                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8552             }
8553             thread_cpu = NULL;
8554             g_free(ts);
8555             rcu_unregister_thread();
8556             pthread_exit(NULL);
8557         }
8558 
8559         pthread_mutex_unlock(&clone_lock);
8560         preexit_cleanup(cpu_env, arg1);
8561         _exit(arg1);
8562         return 0; /* avoid warning */
8563     case TARGET_NR_read:
8564         if (arg2 == 0 && arg3 == 0) {
8565             return get_errno(safe_read(arg1, 0, 0));
8566         } else {
8567             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8568                 return -TARGET_EFAULT;
8569             ret = get_errno(safe_read(arg1, p, arg3));
8570             if (ret >= 0 &&
8571                 fd_trans_host_to_target_data(arg1)) {
8572                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8573             }
8574             unlock_user(p, arg2, ret);
8575         }
8576         return ret;
8577     case TARGET_NR_write:
8578         if (arg2 == 0 && arg3 == 0) {
8579             return get_errno(safe_write(arg1, 0, 0));
8580         }
8581         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8582             return -TARGET_EFAULT;
8583         if (fd_trans_target_to_host_data(arg1)) {
8584             void *copy = g_malloc(arg3);
8585             memcpy(copy, p, arg3);
8586             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8587             if (ret >= 0) {
8588                 ret = get_errno(safe_write(arg1, copy, ret));
8589             }
8590             g_free(copy);
8591         } else {
8592             ret = get_errno(safe_write(arg1, p, arg3));
8593         }
8594         unlock_user(p, arg2, 0);
8595         return ret;
8596 
8597 #ifdef TARGET_NR_open
8598     case TARGET_NR_open:
8599         if (!(p = lock_user_string(arg1)))
8600             return -TARGET_EFAULT;
8601         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8602                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8603                                   arg3));
8604         fd_trans_unregister(ret);
8605         unlock_user(p, arg1, 0);
8606         return ret;
8607 #endif
8608     case TARGET_NR_openat:
8609         if (!(p = lock_user_string(arg2)))
8610             return -TARGET_EFAULT;
8611         ret = get_errno(do_openat(cpu_env, arg1, p,
8612                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8613                                   arg4));
8614         fd_trans_unregister(ret);
8615         unlock_user(p, arg2, 0);
8616         return ret;
8617 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8618     case TARGET_NR_name_to_handle_at:
8619         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8620         return ret;
8621 #endif
8622 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8623     case TARGET_NR_open_by_handle_at:
8624         ret = do_open_by_handle_at(arg1, arg2, arg3);
8625         fd_trans_unregister(ret);
8626         return ret;
8627 #endif
8628     case TARGET_NR_close:
8629         fd_trans_unregister(arg1);
8630         return get_errno(close(arg1));
8631 
8632     case TARGET_NR_brk:
8633         return do_brk(arg1);
8634 #ifdef TARGET_NR_fork
8635     case TARGET_NR_fork:
8636         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8637 #endif
8638 #ifdef TARGET_NR_waitpid
8639     case TARGET_NR_waitpid:
8640         {
8641             int status;
8642             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8643             if (!is_error(ret) && arg2 && ret
8644                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8645                 return -TARGET_EFAULT;
8646         }
8647         return ret;
8648 #endif
8649 #ifdef TARGET_NR_waitid
8650     case TARGET_NR_waitid:
8651         {
8652             siginfo_t info;
8653             info.si_pid = 0;
8654             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8655             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8656                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8657                     return -TARGET_EFAULT;
8658                 host_to_target_siginfo(p, &info);
8659                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8660             }
8661         }
8662         return ret;
8663 #endif
8664 #ifdef TARGET_NR_creat /* not on alpha */
8665     case TARGET_NR_creat:
8666         if (!(p = lock_user_string(arg1)))
8667             return -TARGET_EFAULT;
8668         ret = get_errno(creat(p, arg2));
8669         fd_trans_unregister(ret);
8670         unlock_user(p, arg1, 0);
8671         return ret;
8672 #endif
8673 #ifdef TARGET_NR_link
8674     case TARGET_NR_link:
8675         {
8676             void * p2;
8677             p = lock_user_string(arg1);
8678             p2 = lock_user_string(arg2);
8679             if (!p || !p2)
8680                 ret = -TARGET_EFAULT;
8681             else
8682                 ret = get_errno(link(p, p2));
8683             unlock_user(p2, arg2, 0);
8684             unlock_user(p, arg1, 0);
8685         }
8686         return ret;
8687 #endif
8688 #if defined(TARGET_NR_linkat)
8689     case TARGET_NR_linkat:
8690         {
8691             void * p2 = NULL;
8692             if (!arg2 || !arg4)
8693                 return -TARGET_EFAULT;
8694             p  = lock_user_string(arg2);
8695             p2 = lock_user_string(arg4);
8696             if (!p || !p2)
8697                 ret = -TARGET_EFAULT;
8698             else
8699                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8700             unlock_user(p, arg2, 0);
8701             unlock_user(p2, arg4, 0);
8702         }
8703         return ret;
8704 #endif
8705 #ifdef TARGET_NR_unlink
8706     case TARGET_NR_unlink:
8707         if (!(p = lock_user_string(arg1)))
8708             return -TARGET_EFAULT;
8709         ret = get_errno(unlink(p));
8710         unlock_user(p, arg1, 0);
8711         return ret;
8712 #endif
8713 #if defined(TARGET_NR_unlinkat)
8714     case TARGET_NR_unlinkat:
8715         if (!(p = lock_user_string(arg2)))
8716             return -TARGET_EFAULT;
8717         ret = get_errno(unlinkat(arg1, p, arg3));
8718         unlock_user(p, arg2, 0);
8719         return ret;
8720 #endif
8721     case TARGET_NR_execve:
8722         {
8723             char **argp, **envp;
8724             int argc, envc;
8725             abi_ulong gp;
8726             abi_ulong guest_argp;
8727             abi_ulong guest_envp;
8728             abi_ulong addr;
8729             char **q;
8730 
8731             argc = 0;
8732             guest_argp = arg2;
8733             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8734                 if (get_user_ual(addr, gp))
8735                     return -TARGET_EFAULT;
8736                 if (!addr)
8737                     break;
8738                 argc++;
8739             }
8740             envc = 0;
8741             guest_envp = arg3;
8742             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8743                 if (get_user_ual(addr, gp))
8744                     return -TARGET_EFAULT;
8745                 if (!addr)
8746                     break;
8747                 envc++;
8748             }
8749 
8750             argp = g_new0(char *, argc + 1);
8751             envp = g_new0(char *, envc + 1);
8752 
8753             for (gp = guest_argp, q = argp; gp;
8754                   gp += sizeof(abi_ulong), q++) {
8755                 if (get_user_ual(addr, gp))
8756                     goto execve_efault;
8757                 if (!addr)
8758                     break;
8759                 if (!(*q = lock_user_string(addr)))
8760                     goto execve_efault;
8761             }
8762             *q = NULL;
8763 
8764             for (gp = guest_envp, q = envp; gp;
8765                   gp += sizeof(abi_ulong), q++) {
8766                 if (get_user_ual(addr, gp))
8767                     goto execve_efault;
8768                 if (!addr)
8769                     break;
8770                 if (!(*q = lock_user_string(addr)))
8771                     goto execve_efault;
8772             }
8773             *q = NULL;
8774 
8775             if (!(p = lock_user_string(arg1)))
8776                 goto execve_efault;
8777             /* Although execve() is not an interruptible syscall it is
8778              * a special case where we must use the safe_syscall wrapper:
8779              * if we allow a signal to happen before we make the host
8780              * syscall then we will 'lose' it, because at the point of
8781              * execve the process leaves QEMU's control. So we use the
8782              * safe syscall wrapper to ensure that we either take the
8783              * signal as a guest signal, or else it does not happen
8784              * before the execve completes and makes it the other
8785              * program's problem.
8786              */
8787             ret = get_errno(safe_execve(p, argp, envp));
8788             unlock_user(p, arg1, 0);
8789 
8790             goto execve_end;
8791 
8792         execve_efault:
8793             ret = -TARGET_EFAULT;
8794 
8795         execve_end:
8796             for (gp = guest_argp, q = argp; *q;
8797                   gp += sizeof(abi_ulong), q++) {
8798                 if (get_user_ual(addr, gp)
8799                     || !addr)
8800                     break;
8801                 unlock_user(*q, addr, 0);
8802             }
8803             for (gp = guest_envp, q = envp; *q;
8804                   gp += sizeof(abi_ulong), q++) {
8805                 if (get_user_ual(addr, gp)
8806                     || !addr)
8807                     break;
8808                 unlock_user(*q, addr, 0);
8809             }
8810 
8811             g_free(argp);
8812             g_free(envp);
8813         }
8814         return ret;
8815     case TARGET_NR_chdir:
8816         if (!(p = lock_user_string(arg1)))
8817             return -TARGET_EFAULT;
8818         ret = get_errno(chdir(p));
8819         unlock_user(p, arg1, 0);
8820         return ret;
8821 #ifdef TARGET_NR_time
8822     case TARGET_NR_time:
8823         {
8824             time_t host_time;
8825             ret = get_errno(time(&host_time));
8826             if (!is_error(ret)
8827                 && arg1
8828                 && put_user_sal(host_time, arg1))
8829                 return -TARGET_EFAULT;
8830         }
8831         return ret;
8832 #endif
8833 #ifdef TARGET_NR_mknod
8834     case TARGET_NR_mknod:
8835         if (!(p = lock_user_string(arg1)))
8836             return -TARGET_EFAULT;
8837         ret = get_errno(mknod(p, arg2, arg3));
8838         unlock_user(p, arg1, 0);
8839         return ret;
8840 #endif
8841 #if defined(TARGET_NR_mknodat)
8842     case TARGET_NR_mknodat:
8843         if (!(p = lock_user_string(arg2)))
8844             return -TARGET_EFAULT;
8845         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8846         unlock_user(p, arg2, 0);
8847         return ret;
8848 #endif
8849 #ifdef TARGET_NR_chmod
8850     case TARGET_NR_chmod:
8851         if (!(p = lock_user_string(arg1)))
8852             return -TARGET_EFAULT;
8853         ret = get_errno(chmod(p, arg2));
8854         unlock_user(p, arg1, 0);
8855         return ret;
8856 #endif
8857 #ifdef TARGET_NR_lseek
8858     case TARGET_NR_lseek:
8859         return get_errno(lseek(arg1, arg2, arg3));
8860 #endif
8861 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8862     /* Alpha specific */
8863     case TARGET_NR_getxpid:
8864         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8865         return get_errno(getpid());
8866 #endif
8867 #ifdef TARGET_NR_getpid
8868     case TARGET_NR_getpid:
8869         return get_errno(getpid());
8870 #endif
8871     case TARGET_NR_mount:
8872         {
8873             /* need to look at the data field */
8874             void *p2, *p3;
8875 
8876             if (arg1) {
8877                 p = lock_user_string(arg1);
8878                 if (!p) {
8879                     return -TARGET_EFAULT;
8880                 }
8881             } else {
8882                 p = NULL;
8883             }
8884 
8885             p2 = lock_user_string(arg2);
8886             if (!p2) {
8887                 if (arg1) {
8888                     unlock_user(p, arg1, 0);
8889                 }
8890                 return -TARGET_EFAULT;
8891             }
8892 
8893             if (arg3) {
8894                 p3 = lock_user_string(arg3);
8895                 if (!p3) {
8896                     if (arg1) {
8897                         unlock_user(p, arg1, 0);
8898                     }
8899                     unlock_user(p2, arg2, 0);
8900                     return -TARGET_EFAULT;
8901                 }
8902             } else {
8903                 p3 = NULL;
8904             }
8905 
8906             /* FIXME - arg5 should be locked, but it isn't clear how to
8907              * do that since it's not guaranteed to be a NULL-terminated
8908              * string.
8909              */
8910             if (!arg5) {
8911                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8912             } else {
8913                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8914             }
8915             ret = get_errno(ret);
8916 
8917             if (arg1) {
8918                 unlock_user(p, arg1, 0);
8919             }
8920             unlock_user(p2, arg2, 0);
8921             if (arg3) {
8922                 unlock_user(p3, arg3, 0);
8923             }
8924         }
8925         return ret;
8926 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8927 #if defined(TARGET_NR_umount)
8928     case TARGET_NR_umount:
8929 #endif
8930 #if defined(TARGET_NR_oldumount)
8931     case TARGET_NR_oldumount:
8932 #endif
8933         if (!(p = lock_user_string(arg1)))
8934             return -TARGET_EFAULT;
8935         ret = get_errno(umount(p));
8936         unlock_user(p, arg1, 0);
8937         return ret;
8938 #endif
8939 #ifdef TARGET_NR_stime /* not on alpha */
8940     case TARGET_NR_stime:
8941         {
8942             struct timespec ts;
8943             ts.tv_nsec = 0;
8944             if (get_user_sal(ts.tv_sec, arg1)) {
8945                 return -TARGET_EFAULT;
8946             }
8947             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8948         }
8949 #endif
8950 #ifdef TARGET_NR_alarm /* not on alpha */
8951     case TARGET_NR_alarm:
8952         return alarm(arg1);
8953 #endif
8954 #ifdef TARGET_NR_pause /* not on alpha */
8955     case TARGET_NR_pause:
8956         if (!block_signals()) {
8957             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8958         }
8959         return -TARGET_EINTR;
8960 #endif
8961 #ifdef TARGET_NR_utime
8962     case TARGET_NR_utime:
8963         {
8964             struct utimbuf tbuf, *host_tbuf;
8965             struct target_utimbuf *target_tbuf;
8966             if (arg2) {
8967                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8968                     return -TARGET_EFAULT;
8969                 tbuf.actime = tswapal(target_tbuf->actime);
8970                 tbuf.modtime = tswapal(target_tbuf->modtime);
8971                 unlock_user_struct(target_tbuf, arg2, 0);
8972                 host_tbuf = &tbuf;
8973             } else {
8974                 host_tbuf = NULL;
8975             }
8976             if (!(p = lock_user_string(arg1)))
8977                 return -TARGET_EFAULT;
8978             ret = get_errno(utime(p, host_tbuf));
8979             unlock_user(p, arg1, 0);
8980         }
8981         return ret;
8982 #endif
8983 #ifdef TARGET_NR_utimes
8984     case TARGET_NR_utimes:
8985         {
8986             struct timeval *tvp, tv[2];
8987             if (arg2) {
8988                 if (copy_from_user_timeval(&tv[0], arg2)
8989                     || copy_from_user_timeval(&tv[1],
8990                                               arg2 + sizeof(struct target_timeval)))
8991                     return -TARGET_EFAULT;
8992                 tvp = tv;
8993             } else {
8994                 tvp = NULL;
8995             }
8996             if (!(p = lock_user_string(arg1)))
8997                 return -TARGET_EFAULT;
8998             ret = get_errno(utimes(p, tvp));
8999             unlock_user(p, arg1, 0);
9000         }
9001         return ret;
9002 #endif
9003 #if defined(TARGET_NR_futimesat)
9004     case TARGET_NR_futimesat:
9005         {
9006             struct timeval *tvp, tv[2];
9007             if (arg3) {
9008                 if (copy_from_user_timeval(&tv[0], arg3)
9009                     || copy_from_user_timeval(&tv[1],
9010                                               arg3 + sizeof(struct target_timeval)))
9011                     return -TARGET_EFAULT;
9012                 tvp = tv;
9013             } else {
9014                 tvp = NULL;
9015             }
9016             if (!(p = lock_user_string(arg2))) {
9017                 return -TARGET_EFAULT;
9018             }
9019             ret = get_errno(futimesat(arg1, path(p), tvp));
9020             unlock_user(p, arg2, 0);
9021         }
9022         return ret;
9023 #endif
9024 #ifdef TARGET_NR_access
9025     case TARGET_NR_access:
9026         if (!(p = lock_user_string(arg1))) {
9027             return -TARGET_EFAULT;
9028         }
9029         ret = get_errno(access(path(p), arg2));
9030         unlock_user(p, arg1, 0);
9031         return ret;
9032 #endif
9033 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9034     case TARGET_NR_faccessat:
9035         if (!(p = lock_user_string(arg2))) {
9036             return -TARGET_EFAULT;
9037         }
9038         ret = get_errno(faccessat(arg1, p, arg3, 0));
9039         unlock_user(p, arg2, 0);
9040         return ret;
9041 #endif
9042 #ifdef TARGET_NR_nice /* not on alpha */
9043     case TARGET_NR_nice:
9044         return get_errno(nice(arg1));
9045 #endif
9046     case TARGET_NR_sync:
9047         sync();
9048         return 0;
9049 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9050     case TARGET_NR_syncfs:
9051         return get_errno(syncfs(arg1));
9052 #endif
9053     case TARGET_NR_kill:
9054         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9055 #ifdef TARGET_NR_rename
9056     case TARGET_NR_rename:
9057         {
9058             void *p2;
9059             p = lock_user_string(arg1);
9060             p2 = lock_user_string(arg2);
9061             if (!p || !p2)
9062                 ret = -TARGET_EFAULT;
9063             else
9064                 ret = get_errno(rename(p, p2));
9065             unlock_user(p2, arg2, 0);
9066             unlock_user(p, arg1, 0);
9067         }
9068         return ret;
9069 #endif
9070 #if defined(TARGET_NR_renameat)
9071     case TARGET_NR_renameat:
9072         {
9073             void *p2;
9074             p  = lock_user_string(arg2);
9075             p2 = lock_user_string(arg4);
9076             if (!p || !p2)
9077                 ret = -TARGET_EFAULT;
9078             else
9079                 ret = get_errno(renameat(arg1, p, arg3, p2));
9080             unlock_user(p2, arg4, 0);
9081             unlock_user(p, arg2, 0);
9082         }
9083         return ret;
9084 #endif
9085 #if defined(TARGET_NR_renameat2)
9086     case TARGET_NR_renameat2:
9087         {
9088             void *p2;
9089             p  = lock_user_string(arg2);
9090             p2 = lock_user_string(arg4);
9091             if (!p || !p2) {
9092                 ret = -TARGET_EFAULT;
9093             } else {
9094                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9095             }
9096             unlock_user(p2, arg4, 0);
9097             unlock_user(p, arg2, 0);
9098         }
9099         return ret;
9100 #endif
9101 #ifdef TARGET_NR_mkdir
9102     case TARGET_NR_mkdir:
9103         if (!(p = lock_user_string(arg1)))
9104             return -TARGET_EFAULT;
9105         ret = get_errno(mkdir(p, arg2));
9106         unlock_user(p, arg1, 0);
9107         return ret;
9108 #endif
9109 #if defined(TARGET_NR_mkdirat)
9110     case TARGET_NR_mkdirat:
9111         if (!(p = lock_user_string(arg2)))
9112             return -TARGET_EFAULT;
9113         ret = get_errno(mkdirat(arg1, p, arg3));
9114         unlock_user(p, arg2, 0);
9115         return ret;
9116 #endif
9117 #ifdef TARGET_NR_rmdir
9118     case TARGET_NR_rmdir:
9119         if (!(p = lock_user_string(arg1)))
9120             return -TARGET_EFAULT;
9121         ret = get_errno(rmdir(p));
9122         unlock_user(p, arg1, 0);
9123         return ret;
9124 #endif
9125     case TARGET_NR_dup:
9126         ret = get_errno(dup(arg1));
9127         if (ret >= 0) {
9128             fd_trans_dup(arg1, ret);
9129         }
9130         return ret;
9131 #ifdef TARGET_NR_pipe
9132     case TARGET_NR_pipe:
9133         return do_pipe(cpu_env, arg1, 0, 0);
9134 #endif
9135 #ifdef TARGET_NR_pipe2
9136     case TARGET_NR_pipe2:
9137         return do_pipe(cpu_env, arg1,
9138                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9139 #endif
9140     case TARGET_NR_times:
9141         {
9142             struct target_tms *tmsp;
9143             struct tms tms;
9144             ret = get_errno(times(&tms));
9145             if (arg1) {
9146                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9147                 if (!tmsp)
9148                     return -TARGET_EFAULT;
9149                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9150                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9151                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9152                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9153             }
9154             if (!is_error(ret))
9155                 ret = host_to_target_clock_t(ret);
9156         }
9157         return ret;
9158     case TARGET_NR_acct:
9159         if (arg1 == 0) {
9160             ret = get_errno(acct(NULL));
9161         } else {
9162             if (!(p = lock_user_string(arg1))) {
9163                 return -TARGET_EFAULT;
9164             }
9165             ret = get_errno(acct(path(p)));
9166             unlock_user(p, arg1, 0);
9167         }
9168         return ret;
9169 #ifdef TARGET_NR_umount2
9170     case TARGET_NR_umount2:
9171         if (!(p = lock_user_string(arg1)))
9172             return -TARGET_EFAULT;
9173         ret = get_errno(umount2(p, arg2));
9174         unlock_user(p, arg1, 0);
9175         return ret;
9176 #endif
9177     case TARGET_NR_ioctl:
9178         return do_ioctl(arg1, arg2, arg3);
9179 #ifdef TARGET_NR_fcntl
9180     case TARGET_NR_fcntl:
9181         return do_fcntl(arg1, arg2, arg3);
9182 #endif
9183     case TARGET_NR_setpgid:
9184         return get_errno(setpgid(arg1, arg2));
9185     case TARGET_NR_umask:
9186         return get_errno(umask(arg1));
9187     case TARGET_NR_chroot:
9188         if (!(p = lock_user_string(arg1)))
9189             return -TARGET_EFAULT;
9190         ret = get_errno(chroot(p));
9191         unlock_user(p, arg1, 0);
9192         return ret;
9193 #ifdef TARGET_NR_dup2
9194     case TARGET_NR_dup2:
9195         ret = get_errno(dup2(arg1, arg2));
9196         if (ret >= 0) {
9197             fd_trans_dup(arg1, arg2);
9198         }
9199         return ret;
9200 #endif
9201 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9202     case TARGET_NR_dup3:
9203     {
9204         int host_flags;
9205 
9206         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9207             return -EINVAL;
9208         }
9209         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9210         ret = get_errno(dup3(arg1, arg2, host_flags));
9211         if (ret >= 0) {
9212             fd_trans_dup(arg1, arg2);
9213         }
9214         return ret;
9215     }
9216 #endif
9217 #ifdef TARGET_NR_getppid /* not on alpha */
9218     case TARGET_NR_getppid:
9219         return get_errno(getppid());
9220 #endif
9221 #ifdef TARGET_NR_getpgrp
9222     case TARGET_NR_getpgrp:
9223         return get_errno(getpgrp());
9224 #endif
9225     case TARGET_NR_setsid:
9226         return get_errno(setsid());
9227 #ifdef TARGET_NR_sigaction
9228     case TARGET_NR_sigaction:
9229         {
9230 #if defined(TARGET_MIPS)
9231 	    struct target_sigaction act, oact, *pact, *old_act;
9232 
9233 	    if (arg2) {
9234                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9235                     return -TARGET_EFAULT;
9236 		act._sa_handler = old_act->_sa_handler;
9237 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9238 		act.sa_flags = old_act->sa_flags;
9239 		unlock_user_struct(old_act, arg2, 0);
9240 		pact = &act;
9241 	    } else {
9242 		pact = NULL;
9243 	    }
9244 
9245         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9246 
9247 	    if (!is_error(ret) && arg3) {
9248                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9249                     return -TARGET_EFAULT;
9250 		old_act->_sa_handler = oact._sa_handler;
9251 		old_act->sa_flags = oact.sa_flags;
9252 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9253 		old_act->sa_mask.sig[1] = 0;
9254 		old_act->sa_mask.sig[2] = 0;
9255 		old_act->sa_mask.sig[3] = 0;
9256 		unlock_user_struct(old_act, arg3, 1);
9257 	    }
9258 #else
9259             struct target_old_sigaction *old_act;
9260             struct target_sigaction act, oact, *pact;
9261             if (arg2) {
9262                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9263                     return -TARGET_EFAULT;
9264                 act._sa_handler = old_act->_sa_handler;
9265                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9266                 act.sa_flags = old_act->sa_flags;
9267 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9268                 act.sa_restorer = old_act->sa_restorer;
9269 #endif
9270                 unlock_user_struct(old_act, arg2, 0);
9271                 pact = &act;
9272             } else {
9273                 pact = NULL;
9274             }
9275             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9276             if (!is_error(ret) && arg3) {
9277                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9278                     return -TARGET_EFAULT;
9279                 old_act->_sa_handler = oact._sa_handler;
9280                 old_act->sa_mask = oact.sa_mask.sig[0];
9281                 old_act->sa_flags = oact.sa_flags;
9282 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9283                 old_act->sa_restorer = oact.sa_restorer;
9284 #endif
9285                 unlock_user_struct(old_act, arg3, 1);
9286             }
9287 #endif
9288         }
9289         return ret;
9290 #endif
9291     case TARGET_NR_rt_sigaction:
9292         {
9293             /*
9294              * For Alpha and SPARC this is a 5 argument syscall, with
9295              * a 'restorer' parameter which must be copied into the
9296              * sa_restorer field of the sigaction struct.
9297              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9298              * and arg5 is the sigsetsize.
9299              */
9300 #if defined(TARGET_ALPHA)
9301             target_ulong sigsetsize = arg4;
9302             target_ulong restorer = arg5;
9303 #elif defined(TARGET_SPARC)
9304             target_ulong restorer = arg4;
9305             target_ulong sigsetsize = arg5;
9306 #else
9307             target_ulong sigsetsize = arg4;
9308             target_ulong restorer = 0;
9309 #endif
9310             struct target_sigaction *act = NULL;
9311             struct target_sigaction *oact = NULL;
9312 
9313             if (sigsetsize != sizeof(target_sigset_t)) {
9314                 return -TARGET_EINVAL;
9315             }
9316             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9317                 return -TARGET_EFAULT;
9318             }
9319             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9320                 ret = -TARGET_EFAULT;
9321             } else {
9322                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9323                 if (oact) {
9324                     unlock_user_struct(oact, arg3, 1);
9325                 }
9326             }
9327             if (act) {
9328                 unlock_user_struct(act, arg2, 0);
9329             }
9330         }
9331         return ret;
9332 #ifdef TARGET_NR_sgetmask /* not on alpha */
9333     case TARGET_NR_sgetmask:
9334         {
9335             sigset_t cur_set;
9336             abi_ulong target_set;
9337             ret = do_sigprocmask(0, NULL, &cur_set);
9338             if (!ret) {
9339                 host_to_target_old_sigset(&target_set, &cur_set);
9340                 ret = target_set;
9341             }
9342         }
9343         return ret;
9344 #endif
9345 #ifdef TARGET_NR_ssetmask /* not on alpha */
9346     case TARGET_NR_ssetmask:
9347         {
9348             sigset_t set, oset;
9349             abi_ulong target_set = arg1;
9350             target_to_host_old_sigset(&set, &target_set);
9351             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9352             if (!ret) {
9353                 host_to_target_old_sigset(&target_set, &oset);
9354                 ret = target_set;
9355             }
9356         }
9357         return ret;
9358 #endif
9359 #ifdef TARGET_NR_sigprocmask
9360     case TARGET_NR_sigprocmask:
9361         {
9362 #if defined(TARGET_ALPHA)
9363             sigset_t set, oldset;
9364             abi_ulong mask;
9365             int how;
9366 
9367             switch (arg1) {
9368             case TARGET_SIG_BLOCK:
9369                 how = SIG_BLOCK;
9370                 break;
9371             case TARGET_SIG_UNBLOCK:
9372                 how = SIG_UNBLOCK;
9373                 break;
9374             case TARGET_SIG_SETMASK:
9375                 how = SIG_SETMASK;
9376                 break;
9377             default:
9378                 return -TARGET_EINVAL;
9379             }
9380             mask = arg2;
9381             target_to_host_old_sigset(&set, &mask);
9382 
9383             ret = do_sigprocmask(how, &set, &oldset);
9384             if (!is_error(ret)) {
9385                 host_to_target_old_sigset(&mask, &oldset);
9386                 ret = mask;
9387                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9388             }
9389 #else
9390             sigset_t set, oldset, *set_ptr;
9391             int how;
9392 
9393             if (arg2) {
9394                 switch (arg1) {
9395                 case TARGET_SIG_BLOCK:
9396                     how = SIG_BLOCK;
9397                     break;
9398                 case TARGET_SIG_UNBLOCK:
9399                     how = SIG_UNBLOCK;
9400                     break;
9401                 case TARGET_SIG_SETMASK:
9402                     how = SIG_SETMASK;
9403                     break;
9404                 default:
9405                     return -TARGET_EINVAL;
9406                 }
9407                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9408                     return -TARGET_EFAULT;
9409                 target_to_host_old_sigset(&set, p);
9410                 unlock_user(p, arg2, 0);
9411                 set_ptr = &set;
9412             } else {
9413                 how = 0;
9414                 set_ptr = NULL;
9415             }
9416             ret = do_sigprocmask(how, set_ptr, &oldset);
9417             if (!is_error(ret) && arg3) {
9418                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9419                     return -TARGET_EFAULT;
9420                 host_to_target_old_sigset(p, &oldset);
9421                 unlock_user(p, arg3, sizeof(target_sigset_t));
9422             }
9423 #endif
9424         }
9425         return ret;
9426 #endif
9427     case TARGET_NR_rt_sigprocmask:
9428         {
9429             int how = arg1;
9430             sigset_t set, oldset, *set_ptr;
9431 
9432             if (arg4 != sizeof(target_sigset_t)) {
9433                 return -TARGET_EINVAL;
9434             }
9435 
9436             if (arg2) {
9437                 switch(how) {
9438                 case TARGET_SIG_BLOCK:
9439                     how = SIG_BLOCK;
9440                     break;
9441                 case TARGET_SIG_UNBLOCK:
9442                     how = SIG_UNBLOCK;
9443                     break;
9444                 case TARGET_SIG_SETMASK:
9445                     how = SIG_SETMASK;
9446                     break;
9447                 default:
9448                     return -TARGET_EINVAL;
9449                 }
9450                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9451                     return -TARGET_EFAULT;
9452                 target_to_host_sigset(&set, p);
9453                 unlock_user(p, arg2, 0);
9454                 set_ptr = &set;
9455             } else {
9456                 how = 0;
9457                 set_ptr = NULL;
9458             }
9459             ret = do_sigprocmask(how, set_ptr, &oldset);
9460             if (!is_error(ret) && arg3) {
9461                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9462                     return -TARGET_EFAULT;
9463                 host_to_target_sigset(p, &oldset);
9464                 unlock_user(p, arg3, sizeof(target_sigset_t));
9465             }
9466         }
9467         return ret;
9468 #ifdef TARGET_NR_sigpending
9469     case TARGET_NR_sigpending:
9470         {
9471             sigset_t set;
9472             ret = get_errno(sigpending(&set));
9473             if (!is_error(ret)) {
9474                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9475                     return -TARGET_EFAULT;
9476                 host_to_target_old_sigset(p, &set);
9477                 unlock_user(p, arg1, sizeof(target_sigset_t));
9478             }
9479         }
9480         return ret;
9481 #endif
9482     case TARGET_NR_rt_sigpending:
9483         {
9484             sigset_t set;
9485 
9486             /* Yes, this check is >, not != like most. We follow the kernel's
9487              * logic and it does it like this because it implements
9488              * NR_sigpending through the same code path, and in that case
9489              * the old_sigset_t is smaller in size.
9490              */
9491             if (arg2 > sizeof(target_sigset_t)) {
9492                 return -TARGET_EINVAL;
9493             }
9494 
9495             ret = get_errno(sigpending(&set));
9496             if (!is_error(ret)) {
9497                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9498                     return -TARGET_EFAULT;
9499                 host_to_target_sigset(p, &set);
9500                 unlock_user(p, arg1, sizeof(target_sigset_t));
9501             }
9502         }
9503         return ret;
9504 #ifdef TARGET_NR_sigsuspend
9505     case TARGET_NR_sigsuspend:
9506         {
9507             TaskState *ts = cpu->opaque;
9508 #if defined(TARGET_ALPHA)
9509             abi_ulong mask = arg1;
9510             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9511 #else
9512             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9513                 return -TARGET_EFAULT;
9514             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9515             unlock_user(p, arg1, 0);
9516 #endif
9517             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9518                                                SIGSET_T_SIZE));
9519             if (ret != -QEMU_ERESTARTSYS) {
9520                 ts->in_sigsuspend = 1;
9521             }
9522         }
9523         return ret;
9524 #endif
9525     case TARGET_NR_rt_sigsuspend:
9526         {
9527             TaskState *ts = cpu->opaque;
9528 
9529             if (arg2 != sizeof(target_sigset_t)) {
9530                 return -TARGET_EINVAL;
9531             }
9532             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9533                 return -TARGET_EFAULT;
9534             target_to_host_sigset(&ts->sigsuspend_mask, p);
9535             unlock_user(p, arg1, 0);
9536             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9537                                                SIGSET_T_SIZE));
9538             if (ret != -QEMU_ERESTARTSYS) {
9539                 ts->in_sigsuspend = 1;
9540             }
9541         }
9542         return ret;
9543 #ifdef TARGET_NR_rt_sigtimedwait
9544     case TARGET_NR_rt_sigtimedwait:
9545         {
9546             sigset_t set;
9547             struct timespec uts, *puts;
9548             siginfo_t uinfo;
9549 
9550             if (arg4 != sizeof(target_sigset_t)) {
9551                 return -TARGET_EINVAL;
9552             }
9553 
9554             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9555                 return -TARGET_EFAULT;
9556             target_to_host_sigset(&set, p);
9557             unlock_user(p, arg1, 0);
9558             if (arg3) {
9559                 puts = &uts;
9560                 if (target_to_host_timespec(puts, arg3)) {
9561                     return -TARGET_EFAULT;
9562                 }
9563             } else {
9564                 puts = NULL;
9565             }
9566             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9567                                                  SIGSET_T_SIZE));
9568             if (!is_error(ret)) {
9569                 if (arg2) {
9570                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9571                                   0);
9572                     if (!p) {
9573                         return -TARGET_EFAULT;
9574                     }
9575                     host_to_target_siginfo(p, &uinfo);
9576                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9577                 }
9578                 ret = host_to_target_signal(ret);
9579             }
9580         }
9581         return ret;
9582 #endif
9583 #ifdef TARGET_NR_rt_sigtimedwait_time64
9584     case TARGET_NR_rt_sigtimedwait_time64:
9585         {
9586             sigset_t set;
9587             struct timespec uts, *puts;
9588             siginfo_t uinfo;
9589 
9590             if (arg4 != sizeof(target_sigset_t)) {
9591                 return -TARGET_EINVAL;
9592             }
9593 
9594             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9595             if (!p) {
9596                 return -TARGET_EFAULT;
9597             }
9598             target_to_host_sigset(&set, p);
9599             unlock_user(p, arg1, 0);
9600             if (arg3) {
9601                 puts = &uts;
9602                 if (target_to_host_timespec64(puts, arg3)) {
9603                     return -TARGET_EFAULT;
9604                 }
9605             } else {
9606                 puts = NULL;
9607             }
9608             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9609                                                  SIGSET_T_SIZE));
9610             if (!is_error(ret)) {
9611                 if (arg2) {
9612                     p = lock_user(VERIFY_WRITE, arg2,
9613                                   sizeof(target_siginfo_t), 0);
9614                     if (!p) {
9615                         return -TARGET_EFAULT;
9616                     }
9617                     host_to_target_siginfo(p, &uinfo);
9618                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9619                 }
9620                 ret = host_to_target_signal(ret);
9621             }
9622         }
9623         return ret;
9624 #endif
9625     case TARGET_NR_rt_sigqueueinfo:
9626         {
9627             siginfo_t uinfo;
9628 
9629             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9630             if (!p) {
9631                 return -TARGET_EFAULT;
9632             }
9633             target_to_host_siginfo(&uinfo, p);
9634             unlock_user(p, arg3, 0);
9635             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9636         }
9637         return ret;
9638     case TARGET_NR_rt_tgsigqueueinfo:
9639         {
9640             siginfo_t uinfo;
9641 
9642             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9643             if (!p) {
9644                 return -TARGET_EFAULT;
9645             }
9646             target_to_host_siginfo(&uinfo, p);
9647             unlock_user(p, arg4, 0);
9648             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9649         }
9650         return ret;
9651 #ifdef TARGET_NR_sigreturn
9652     case TARGET_NR_sigreturn:
9653         if (block_signals()) {
9654             return -QEMU_ERESTARTSYS;
9655         }
9656         return do_sigreturn(cpu_env);
9657 #endif
9658     case TARGET_NR_rt_sigreturn:
9659         if (block_signals()) {
9660             return -QEMU_ERESTARTSYS;
9661         }
9662         return do_rt_sigreturn(cpu_env);
9663     case TARGET_NR_sethostname:
9664         if (!(p = lock_user_string(arg1)))
9665             return -TARGET_EFAULT;
9666         ret = get_errno(sethostname(p, arg2));
9667         unlock_user(p, arg1, 0);
9668         return ret;
9669 #ifdef TARGET_NR_setrlimit
9670     case TARGET_NR_setrlimit:
9671         {
9672             int resource = target_to_host_resource(arg1);
9673             struct target_rlimit *target_rlim;
9674             struct rlimit rlim;
9675             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9676                 return -TARGET_EFAULT;
9677             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9678             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9679             unlock_user_struct(target_rlim, arg2, 0);
9680             /*
9681              * If we just passed through resource limit settings for memory then
9682              * they would also apply to QEMU's own allocations, and QEMU will
9683              * crash or hang or die if its allocations fail. Ideally we would
9684              * track the guest allocations in QEMU and apply the limits ourselves.
9685              * For now, just tell the guest the call succeeded but don't actually
9686              * limit anything.
9687              */
9688             if (resource != RLIMIT_AS &&
9689                 resource != RLIMIT_DATA &&
9690                 resource != RLIMIT_STACK) {
9691                 return get_errno(setrlimit(resource, &rlim));
9692             } else {
9693                 return 0;
9694             }
9695         }
9696 #endif
9697 #ifdef TARGET_NR_getrlimit
9698     case TARGET_NR_getrlimit:
9699         {
9700             int resource = target_to_host_resource(arg1);
9701             struct target_rlimit *target_rlim;
9702             struct rlimit rlim;
9703 
9704             ret = get_errno(getrlimit(resource, &rlim));
9705             if (!is_error(ret)) {
9706                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9707                     return -TARGET_EFAULT;
9708                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9709                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9710                 unlock_user_struct(target_rlim, arg2, 1);
9711             }
9712         }
9713         return ret;
9714 #endif
9715     case TARGET_NR_getrusage:
9716         {
9717             struct rusage rusage;
9718             ret = get_errno(getrusage(arg1, &rusage));
9719             if (!is_error(ret)) {
9720                 ret = host_to_target_rusage(arg2, &rusage);
9721             }
9722         }
9723         return ret;
9724 #if defined(TARGET_NR_gettimeofday)
9725     case TARGET_NR_gettimeofday:
9726         {
9727             struct timeval tv;
9728             struct timezone tz;
9729 
9730             ret = get_errno(gettimeofday(&tv, &tz));
9731             if (!is_error(ret)) {
9732                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9733                     return -TARGET_EFAULT;
9734                 }
9735                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9736                     return -TARGET_EFAULT;
9737                 }
9738             }
9739         }
9740         return ret;
9741 #endif
9742 #if defined(TARGET_NR_settimeofday)
9743     case TARGET_NR_settimeofday:
9744         {
9745             struct timeval tv, *ptv = NULL;
9746             struct timezone tz, *ptz = NULL;
9747 
9748             if (arg1) {
9749                 if (copy_from_user_timeval(&tv, arg1)) {
9750                     return -TARGET_EFAULT;
9751                 }
9752                 ptv = &tv;
9753             }
9754 
9755             if (arg2) {
9756                 if (copy_from_user_timezone(&tz, arg2)) {
9757                     return -TARGET_EFAULT;
9758                 }
9759                 ptz = &tz;
9760             }
9761 
9762             return get_errno(settimeofday(ptv, ptz));
9763         }
9764 #endif
9765 #if defined(TARGET_NR_select)
9766     case TARGET_NR_select:
9767 #if defined(TARGET_WANT_NI_OLD_SELECT)
9768         /* some architectures used to have old_select here
9769          * but now ENOSYS it.
9770          */
9771         ret = -TARGET_ENOSYS;
9772 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9773         ret = do_old_select(arg1);
9774 #else
9775         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9776 #endif
9777         return ret;
9778 #endif
9779 #ifdef TARGET_NR_pselect6
9780     case TARGET_NR_pselect6:
9781         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9782 #endif
9783 #ifdef TARGET_NR_pselect6_time64
9784     case TARGET_NR_pselect6_time64:
9785         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9786 #endif
9787 #ifdef TARGET_NR_symlink
9788     case TARGET_NR_symlink:
9789         {
9790             void *p2;
9791             p = lock_user_string(arg1);
9792             p2 = lock_user_string(arg2);
9793             if (!p || !p2)
9794                 ret = -TARGET_EFAULT;
9795             else
9796                 ret = get_errno(symlink(p, p2));
9797             unlock_user(p2, arg2, 0);
9798             unlock_user(p, arg1, 0);
9799         }
9800         return ret;
9801 #endif
9802 #if defined(TARGET_NR_symlinkat)
9803     case TARGET_NR_symlinkat:
9804         {
9805             void *p2;
9806             p  = lock_user_string(arg1);
9807             p2 = lock_user_string(arg3);
9808             if (!p || !p2)
9809                 ret = -TARGET_EFAULT;
9810             else
9811                 ret = get_errno(symlinkat(p, arg2, p2));
9812             unlock_user(p2, arg3, 0);
9813             unlock_user(p, arg1, 0);
9814         }
9815         return ret;
9816 #endif
9817 #ifdef TARGET_NR_readlink
9818     case TARGET_NR_readlink:
9819         {
9820             void *p2;
9821             p = lock_user_string(arg1);
9822             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9823             if (!p || !p2) {
9824                 ret = -TARGET_EFAULT;
9825             } else if (!arg3) {
9826                 /* Short circuit this for the magic exe check. */
9827                 ret = -TARGET_EINVAL;
9828             } else if (is_proc_myself((const char *)p, "exe")) {
9829                 char real[PATH_MAX], *temp;
9830                 temp = realpath(exec_path, real);
9831                 /* Return value is # of bytes that we wrote to the buffer. */
9832                 if (temp == NULL) {
9833                     ret = get_errno(-1);
9834                 } else {
9835                     /* Don't worry about sign mismatch as earlier mapping
9836                      * logic would have thrown a bad address error. */
9837                     ret = MIN(strlen(real), arg3);
9838                     /* We cannot NUL terminate the string. */
9839                     memcpy(p2, real, ret);
9840                 }
9841             } else {
9842                 ret = get_errno(readlink(path(p), p2, arg3));
9843             }
9844             unlock_user(p2, arg2, ret);
9845             unlock_user(p, arg1, 0);
9846         }
9847         return ret;
9848 #endif
9849 #if defined(TARGET_NR_readlinkat)
9850     case TARGET_NR_readlinkat:
9851         {
9852             void *p2;
9853             p  = lock_user_string(arg2);
9854             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9855             if (!p || !p2) {
9856                 ret = -TARGET_EFAULT;
9857             } else if (is_proc_myself((const char *)p, "exe")) {
9858                 char real[PATH_MAX], *temp;
9859                 temp = realpath(exec_path, real);
9860                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9861                 snprintf((char *)p2, arg4, "%s", real);
9862             } else {
9863                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9864             }
9865             unlock_user(p2, arg3, ret);
9866             unlock_user(p, arg2, 0);
9867         }
9868         return ret;
9869 #endif
9870 #ifdef TARGET_NR_swapon
9871     case TARGET_NR_swapon:
9872         if (!(p = lock_user_string(arg1)))
9873             return -TARGET_EFAULT;
9874         ret = get_errno(swapon(p, arg2));
9875         unlock_user(p, arg1, 0);
9876         return ret;
9877 #endif
9878     case TARGET_NR_reboot:
9879         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9880            /* arg4 must be ignored in all other cases */
9881            p = lock_user_string(arg4);
9882            if (!p) {
9883                return -TARGET_EFAULT;
9884            }
9885            ret = get_errno(reboot(arg1, arg2, arg3, p));
9886            unlock_user(p, arg4, 0);
9887         } else {
9888            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9889         }
9890         return ret;
9891 #ifdef TARGET_NR_mmap
9892     case TARGET_NR_mmap:
9893 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9894     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9895     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9896     || defined(TARGET_S390X)
9897         {
9898             abi_ulong *v;
9899             abi_ulong v1, v2, v3, v4, v5, v6;
9900             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9901                 return -TARGET_EFAULT;
9902             v1 = tswapal(v[0]);
9903             v2 = tswapal(v[1]);
9904             v3 = tswapal(v[2]);
9905             v4 = tswapal(v[3]);
9906             v5 = tswapal(v[4]);
9907             v6 = tswapal(v[5]);
9908             unlock_user(v, arg1, 0);
9909             ret = get_errno(target_mmap(v1, v2, v3,
9910                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9911                                         v5, v6));
9912         }
9913 #else
9914         /* mmap pointers are always untagged */
9915         ret = get_errno(target_mmap(arg1, arg2, arg3,
9916                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9917                                     arg5,
9918                                     arg6));
9919 #endif
9920         return ret;
9921 #endif
9922 #ifdef TARGET_NR_mmap2
9923     case TARGET_NR_mmap2:
9924 #ifndef MMAP_SHIFT
9925 #define MMAP_SHIFT 12
9926 #endif
9927         ret = target_mmap(arg1, arg2, arg3,
9928                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9929                           arg5, arg6 << MMAP_SHIFT);
9930         return get_errno(ret);
9931 #endif
9932     case TARGET_NR_munmap:
9933         arg1 = cpu_untagged_addr(cpu, arg1);
9934         return get_errno(target_munmap(arg1, arg2));
9935     case TARGET_NR_mprotect:
9936         arg1 = cpu_untagged_addr(cpu, arg1);
9937         {
9938             TaskState *ts = cpu->opaque;
9939             /* Special hack to detect libc making the stack executable.  */
9940             if ((arg3 & PROT_GROWSDOWN)
9941                 && arg1 >= ts->info->stack_limit
9942                 && arg1 <= ts->info->start_stack) {
9943                 arg3 &= ~PROT_GROWSDOWN;
9944                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9945                 arg1 = ts->info->stack_limit;
9946             }
9947         }
9948         return get_errno(target_mprotect(arg1, arg2, arg3));
9949 #ifdef TARGET_NR_mremap
9950     case TARGET_NR_mremap:
9951         arg1 = cpu_untagged_addr(cpu, arg1);
9952         /* mremap new_addr (arg5) is always untagged */
9953         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9954 #endif
9955         /* ??? msync/mlock/munlock are broken for softmmu.  */
9956 #ifdef TARGET_NR_msync
9957     case TARGET_NR_msync:
9958         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9959 #endif
9960 #ifdef TARGET_NR_mlock
9961     case TARGET_NR_mlock:
9962         return get_errno(mlock(g2h(cpu, arg1), arg2));
9963 #endif
9964 #ifdef TARGET_NR_munlock
9965     case TARGET_NR_munlock:
9966         return get_errno(munlock(g2h(cpu, arg1), arg2));
9967 #endif
9968 #ifdef TARGET_NR_mlockall
9969     case TARGET_NR_mlockall:
9970         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9971 #endif
9972 #ifdef TARGET_NR_munlockall
9973     case TARGET_NR_munlockall:
9974         return get_errno(munlockall());
9975 #endif
9976 #ifdef TARGET_NR_truncate
9977     case TARGET_NR_truncate:
9978         if (!(p = lock_user_string(arg1)))
9979             return -TARGET_EFAULT;
9980         ret = get_errno(truncate(p, arg2));
9981         unlock_user(p, arg1, 0);
9982         return ret;
9983 #endif
9984 #ifdef TARGET_NR_ftruncate
9985     case TARGET_NR_ftruncate:
9986         return get_errno(ftruncate(arg1, arg2));
9987 #endif
9988     case TARGET_NR_fchmod:
9989         return get_errno(fchmod(arg1, arg2));
9990 #if defined(TARGET_NR_fchmodat)
9991     case TARGET_NR_fchmodat:
9992         if (!(p = lock_user_string(arg2)))
9993             return -TARGET_EFAULT;
9994         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9995         unlock_user(p, arg2, 0);
9996         return ret;
9997 #endif
9998     case TARGET_NR_getpriority:
9999         /* Note that negative values are valid for getpriority, so we must
10000            differentiate based on errno settings.  */
10001         errno = 0;
10002         ret = getpriority(arg1, arg2);
10003         if (ret == -1 && errno != 0) {
10004             return -host_to_target_errno(errno);
10005         }
10006 #ifdef TARGET_ALPHA
10007         /* Return value is the unbiased priority.  Signal no error.  */
10008         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
10009 #else
10010         /* Return value is a biased priority to avoid negative numbers.  */
10011         ret = 20 - ret;
10012 #endif
10013         return ret;
10014     case TARGET_NR_setpriority:
10015         return get_errno(setpriority(arg1, arg2, arg3));
10016 #ifdef TARGET_NR_statfs
10017     case TARGET_NR_statfs:
10018         if (!(p = lock_user_string(arg1))) {
10019             return -TARGET_EFAULT;
10020         }
10021         ret = get_errno(statfs(path(p), &stfs));
10022         unlock_user(p, arg1, 0);
10023     convert_statfs:
10024         if (!is_error(ret)) {
10025             struct target_statfs *target_stfs;
10026 
10027             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10028                 return -TARGET_EFAULT;
10029             __put_user(stfs.f_type, &target_stfs->f_type);
10030             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10031             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10032             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10033             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10034             __put_user(stfs.f_files, &target_stfs->f_files);
10035             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10036             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10037             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10038             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10039             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10040 #ifdef _STATFS_F_FLAGS
10041             __put_user(stfs.f_flags, &target_stfs->f_flags);
10042 #else
10043             __put_user(0, &target_stfs->f_flags);
10044 #endif
10045             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10046             unlock_user_struct(target_stfs, arg2, 1);
10047         }
10048         return ret;
10049 #endif
10050 #ifdef TARGET_NR_fstatfs
10051     case TARGET_NR_fstatfs:
10052         ret = get_errno(fstatfs(arg1, &stfs));
10053         goto convert_statfs;
10054 #endif
10055 #ifdef TARGET_NR_statfs64
10056     case TARGET_NR_statfs64:
10057         if (!(p = lock_user_string(arg1))) {
10058             return -TARGET_EFAULT;
10059         }
10060         ret = get_errno(statfs(path(p), &stfs));
10061         unlock_user(p, arg1, 0);
10062     convert_statfs64:
10063         if (!is_error(ret)) {
10064             struct target_statfs64 *target_stfs;
10065 
10066             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10067                 return -TARGET_EFAULT;
10068             __put_user(stfs.f_type, &target_stfs->f_type);
10069             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10070             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10071             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10072             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10073             __put_user(stfs.f_files, &target_stfs->f_files);
10074             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10075             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10076             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10077             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10078             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10079 #ifdef _STATFS_F_FLAGS
10080             __put_user(stfs.f_flags, &target_stfs->f_flags);
10081 #else
10082             __put_user(0, &target_stfs->f_flags);
10083 #endif
10084             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10085             unlock_user_struct(target_stfs, arg3, 1);
10086         }
10087         return ret;
10088     case TARGET_NR_fstatfs64:
10089         ret = get_errno(fstatfs(arg1, &stfs));
10090         goto convert_statfs64;
10091 #endif
10092 #ifdef TARGET_NR_socketcall
10093     case TARGET_NR_socketcall:
10094         return do_socketcall(arg1, arg2);
10095 #endif
10096 #ifdef TARGET_NR_accept
10097     case TARGET_NR_accept:
10098         return do_accept4(arg1, arg2, arg3, 0);
10099 #endif
10100 #ifdef TARGET_NR_accept4
10101     case TARGET_NR_accept4:
10102         return do_accept4(arg1, arg2, arg3, arg4);
10103 #endif
10104 #ifdef TARGET_NR_bind
10105     case TARGET_NR_bind:
10106         return do_bind(arg1, arg2, arg3);
10107 #endif
10108 #ifdef TARGET_NR_connect
10109     case TARGET_NR_connect:
10110         return do_connect(arg1, arg2, arg3);
10111 #endif
10112 #ifdef TARGET_NR_getpeername
10113     case TARGET_NR_getpeername:
10114         return do_getpeername(arg1, arg2, arg3);
10115 #endif
10116 #ifdef TARGET_NR_getsockname
10117     case TARGET_NR_getsockname:
10118         return do_getsockname(arg1, arg2, arg3);
10119 #endif
10120 #ifdef TARGET_NR_getsockopt
10121     case TARGET_NR_getsockopt:
10122         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10123 #endif
10124 #ifdef TARGET_NR_listen
10125     case TARGET_NR_listen:
10126         return get_errno(listen(arg1, arg2));
10127 #endif
10128 #ifdef TARGET_NR_recv
10129     case TARGET_NR_recv:
10130         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10131 #endif
10132 #ifdef TARGET_NR_recvfrom
10133     case TARGET_NR_recvfrom:
10134         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10135 #endif
10136 #ifdef TARGET_NR_recvmsg
10137     case TARGET_NR_recvmsg:
10138         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10139 #endif
10140 #ifdef TARGET_NR_send
10141     case TARGET_NR_send:
10142         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10143 #endif
10144 #ifdef TARGET_NR_sendmsg
10145     case TARGET_NR_sendmsg:
10146         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10147 #endif
10148 #ifdef TARGET_NR_sendmmsg
10149     case TARGET_NR_sendmmsg:
10150         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10151 #endif
10152 #ifdef TARGET_NR_recvmmsg
10153     case TARGET_NR_recvmmsg:
10154         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10155 #endif
10156 #ifdef TARGET_NR_sendto
10157     case TARGET_NR_sendto:
10158         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10159 #endif
10160 #ifdef TARGET_NR_shutdown
10161     case TARGET_NR_shutdown:
10162         return get_errno(shutdown(arg1, arg2));
10163 #endif
10164 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10165     case TARGET_NR_getrandom:
10166         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10167         if (!p) {
10168             return -TARGET_EFAULT;
10169         }
10170         ret = get_errno(getrandom(p, arg2, arg3));
10171         unlock_user(p, arg1, ret);
10172         return ret;
10173 #endif
10174 #ifdef TARGET_NR_socket
10175     case TARGET_NR_socket:
10176         return do_socket(arg1, arg2, arg3);
10177 #endif
10178 #ifdef TARGET_NR_socketpair
10179     case TARGET_NR_socketpair:
10180         return do_socketpair(arg1, arg2, arg3, arg4);
10181 #endif
10182 #ifdef TARGET_NR_setsockopt
10183     case TARGET_NR_setsockopt:
10184         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10185 #endif
10186 #if defined(TARGET_NR_syslog)
10187     case TARGET_NR_syslog:
10188         {
10189             int len = arg2;
10190 
10191             switch (arg1) {
10192             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10193             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10194             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10195             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10196             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10197             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10198             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10199             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10200                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10201             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10202             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10203             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10204                 {
10205                     if (len < 0) {
10206                         return -TARGET_EINVAL;
10207                     }
10208                     if (len == 0) {
10209                         return 0;
10210                     }
10211                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10212                     if (!p) {
10213                         return -TARGET_EFAULT;
10214                     }
10215                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10216                     unlock_user(p, arg2, arg3);
10217                 }
10218                 return ret;
10219             default:
10220                 return -TARGET_EINVAL;
10221             }
10222         }
10223         break;
10224 #endif
10225     case TARGET_NR_setitimer:
10226         {
10227             struct itimerval value, ovalue, *pvalue;
10228 
10229             if (arg2) {
10230                 pvalue = &value;
10231                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10232                     || copy_from_user_timeval(&pvalue->it_value,
10233                                               arg2 + sizeof(struct target_timeval)))
10234                     return -TARGET_EFAULT;
10235             } else {
10236                 pvalue = NULL;
10237             }
10238             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10239             if (!is_error(ret) && arg3) {
10240                 if (copy_to_user_timeval(arg3,
10241                                          &ovalue.it_interval)
10242                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10243                                             &ovalue.it_value))
10244                     return -TARGET_EFAULT;
10245             }
10246         }
10247         return ret;
10248     case TARGET_NR_getitimer:
10249         {
10250             struct itimerval value;
10251 
10252             ret = get_errno(getitimer(arg1, &value));
10253             if (!is_error(ret) && arg2) {
10254                 if (copy_to_user_timeval(arg2,
10255                                          &value.it_interval)
10256                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10257                                             &value.it_value))
10258                     return -TARGET_EFAULT;
10259             }
10260         }
10261         return ret;
10262 #ifdef TARGET_NR_stat
10263     case TARGET_NR_stat:
10264         if (!(p = lock_user_string(arg1))) {
10265             return -TARGET_EFAULT;
10266         }
10267         ret = get_errno(stat(path(p), &st));
10268         unlock_user(p, arg1, 0);
10269         goto do_stat;
10270 #endif
10271 #ifdef TARGET_NR_lstat
10272     case TARGET_NR_lstat:
10273         if (!(p = lock_user_string(arg1))) {
10274             return -TARGET_EFAULT;
10275         }
10276         ret = get_errno(lstat(path(p), &st));
10277         unlock_user(p, arg1, 0);
10278         goto do_stat;
10279 #endif
10280 #ifdef TARGET_NR_fstat
10281     case TARGET_NR_fstat:
10282         {
10283             ret = get_errno(fstat(arg1, &st));
10284 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10285         do_stat:
10286 #endif
10287             if (!is_error(ret)) {
10288                 struct target_stat *target_st;
10289 
10290                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10291                     return -TARGET_EFAULT;
10292                 memset(target_st, 0, sizeof(*target_st));
10293                 __put_user(st.st_dev, &target_st->st_dev);
10294                 __put_user(st.st_ino, &target_st->st_ino);
10295                 __put_user(st.st_mode, &target_st->st_mode);
10296                 __put_user(st.st_uid, &target_st->st_uid);
10297                 __put_user(st.st_gid, &target_st->st_gid);
10298                 __put_user(st.st_nlink, &target_st->st_nlink);
10299                 __put_user(st.st_rdev, &target_st->st_rdev);
10300                 __put_user(st.st_size, &target_st->st_size);
10301                 __put_user(st.st_blksize, &target_st->st_blksize);
10302                 __put_user(st.st_blocks, &target_st->st_blocks);
10303                 __put_user(st.st_atime, &target_st->target_st_atime);
10304                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10305                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10306 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10307                 __put_user(st.st_atim.tv_nsec,
10308                            &target_st->target_st_atime_nsec);
10309                 __put_user(st.st_mtim.tv_nsec,
10310                            &target_st->target_st_mtime_nsec);
10311                 __put_user(st.st_ctim.tv_nsec,
10312                            &target_st->target_st_ctime_nsec);
10313 #endif
10314                 unlock_user_struct(target_st, arg2, 1);
10315             }
10316         }
10317         return ret;
10318 #endif
10319     case TARGET_NR_vhangup:
10320         return get_errno(vhangup());
10321 #ifdef TARGET_NR_syscall
10322     case TARGET_NR_syscall:
10323         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10324                           arg6, arg7, arg8, 0);
10325 #endif
10326 #if defined(TARGET_NR_wait4)
10327     case TARGET_NR_wait4:
10328         {
10329             int status;
10330             abi_long status_ptr = arg2;
10331             struct rusage rusage, *rusage_ptr;
10332             abi_ulong target_rusage = arg4;
10333             abi_long rusage_err;
10334             if (target_rusage)
10335                 rusage_ptr = &rusage;
10336             else
10337                 rusage_ptr = NULL;
10338             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10339             if (!is_error(ret)) {
10340                 if (status_ptr && ret) {
10341                     status = host_to_target_waitstatus(status);
10342                     if (put_user_s32(status, status_ptr))
10343                         return -TARGET_EFAULT;
10344                 }
10345                 if (target_rusage) {
10346                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10347                     if (rusage_err) {
10348                         ret = rusage_err;
10349                     }
10350                 }
10351             }
10352         }
10353         return ret;
10354 #endif
10355 #ifdef TARGET_NR_swapoff
10356     case TARGET_NR_swapoff:
10357         if (!(p = lock_user_string(arg1)))
10358             return -TARGET_EFAULT;
10359         ret = get_errno(swapoff(p));
10360         unlock_user(p, arg1, 0);
10361         return ret;
10362 #endif
10363     case TARGET_NR_sysinfo:
10364         {
10365             struct target_sysinfo *target_value;
10366             struct sysinfo value;
10367             ret = get_errno(sysinfo(&value));
10368             if (!is_error(ret) && arg1)
10369             {
10370                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10371                     return -TARGET_EFAULT;
10372                 __put_user(value.uptime, &target_value->uptime);
10373                 __put_user(value.loads[0], &target_value->loads[0]);
10374                 __put_user(value.loads[1], &target_value->loads[1]);
10375                 __put_user(value.loads[2], &target_value->loads[2]);
10376                 __put_user(value.totalram, &target_value->totalram);
10377                 __put_user(value.freeram, &target_value->freeram);
10378                 __put_user(value.sharedram, &target_value->sharedram);
10379                 __put_user(value.bufferram, &target_value->bufferram);
10380                 __put_user(value.totalswap, &target_value->totalswap);
10381                 __put_user(value.freeswap, &target_value->freeswap);
10382                 __put_user(value.procs, &target_value->procs);
10383                 __put_user(value.totalhigh, &target_value->totalhigh);
10384                 __put_user(value.freehigh, &target_value->freehigh);
10385                 __put_user(value.mem_unit, &target_value->mem_unit);
10386                 unlock_user_struct(target_value, arg1, 1);
10387             }
10388         }
10389         return ret;
10390 #ifdef TARGET_NR_ipc
10391     case TARGET_NR_ipc:
10392         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10393 #endif
10394 #ifdef TARGET_NR_semget
10395     case TARGET_NR_semget:
10396         return get_errno(semget(arg1, arg2, arg3));
10397 #endif
10398 #ifdef TARGET_NR_semop
10399     case TARGET_NR_semop:
10400         return do_semtimedop(arg1, arg2, arg3, 0, false);
10401 #endif
10402 #ifdef TARGET_NR_semtimedop
10403     case TARGET_NR_semtimedop:
10404         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10405 #endif
10406 #ifdef TARGET_NR_semtimedop_time64
10407     case TARGET_NR_semtimedop_time64:
10408         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10409 #endif
10410 #ifdef TARGET_NR_semctl
10411     case TARGET_NR_semctl:
10412         return do_semctl(arg1, arg2, arg3, arg4);
10413 #endif
10414 #ifdef TARGET_NR_msgctl
10415     case TARGET_NR_msgctl:
10416         return do_msgctl(arg1, arg2, arg3);
10417 #endif
10418 #ifdef TARGET_NR_msgget
10419     case TARGET_NR_msgget:
10420         return get_errno(msgget(arg1, arg2));
10421 #endif
10422 #ifdef TARGET_NR_msgrcv
10423     case TARGET_NR_msgrcv:
10424         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10425 #endif
10426 #ifdef TARGET_NR_msgsnd
10427     case TARGET_NR_msgsnd:
10428         return do_msgsnd(arg1, arg2, arg3, arg4);
10429 #endif
10430 #ifdef TARGET_NR_shmget
10431     case TARGET_NR_shmget:
10432         return get_errno(shmget(arg1, arg2, arg3));
10433 #endif
10434 #ifdef TARGET_NR_shmctl
10435     case TARGET_NR_shmctl:
10436         return do_shmctl(arg1, arg2, arg3);
10437 #endif
10438 #ifdef TARGET_NR_shmat
10439     case TARGET_NR_shmat:
10440         return do_shmat(cpu_env, arg1, arg2, arg3);
10441 #endif
10442 #ifdef TARGET_NR_shmdt
10443     case TARGET_NR_shmdt:
10444         return do_shmdt(arg1);
10445 #endif
10446     case TARGET_NR_fsync:
10447         return get_errno(fsync(arg1));
10448     case TARGET_NR_clone:
10449         /* Linux manages to have three different orderings for its
10450          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10451          * match the kernel's CONFIG_CLONE_* settings.
10452          * Microblaze is further special in that it uses a sixth
10453          * implicit argument to clone for the TLS pointer.
10454          */
10455 #if defined(TARGET_MICROBLAZE)
10456         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10457 #elif defined(TARGET_CLONE_BACKWARDS)
10458         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10459 #elif defined(TARGET_CLONE_BACKWARDS2)
10460         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10461 #else
10462         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10463 #endif
10464         return ret;
10465 #ifdef __NR_exit_group
10466         /* new thread calls */
10467     case TARGET_NR_exit_group:
10468         preexit_cleanup(cpu_env, arg1);
10469         return get_errno(exit_group(arg1));
10470 #endif
10471     case TARGET_NR_setdomainname:
10472         if (!(p = lock_user_string(arg1)))
10473             return -TARGET_EFAULT;
10474         ret = get_errno(setdomainname(p, arg2));
10475         unlock_user(p, arg1, 0);
10476         return ret;
10477     case TARGET_NR_uname:
10478         /* no need to transcode because we use the linux syscall */
10479         {
10480             struct new_utsname * buf;
10481 
10482             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10483                 return -TARGET_EFAULT;
10484             ret = get_errno(sys_uname(buf));
10485             if (!is_error(ret)) {
10486                 /* Overwrite the native machine name with whatever is being
10487                    emulated. */
10488                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10489                           sizeof(buf->machine));
10490                 /* Allow the user to override the reported release.  */
10491                 if (qemu_uname_release && *qemu_uname_release) {
10492                     g_strlcpy(buf->release, qemu_uname_release,
10493                               sizeof(buf->release));
10494                 }
10495             }
10496             unlock_user_struct(buf, arg1, 1);
10497         }
10498         return ret;
10499 #ifdef TARGET_I386
10500     case TARGET_NR_modify_ldt:
10501         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10502 #if !defined(TARGET_X86_64)
10503     case TARGET_NR_vm86:
10504         return do_vm86(cpu_env, arg1, arg2);
10505 #endif
10506 #endif
10507 #if defined(TARGET_NR_adjtimex)
10508     case TARGET_NR_adjtimex:
10509         {
10510             struct timex host_buf;
10511 
10512             if (target_to_host_timex(&host_buf, arg1) != 0) {
10513                 return -TARGET_EFAULT;
10514             }
10515             ret = get_errno(adjtimex(&host_buf));
10516             if (!is_error(ret)) {
10517                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10518                     return -TARGET_EFAULT;
10519                 }
10520             }
10521         }
10522         return ret;
10523 #endif
10524 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10525     case TARGET_NR_clock_adjtime:
10526         {
10527             struct timex htx, *phtx = &htx;
10528 
10529             if (target_to_host_timex(phtx, arg2) != 0) {
10530                 return -TARGET_EFAULT;
10531             }
10532             ret = get_errno(clock_adjtime(arg1, phtx));
10533             if (!is_error(ret) && phtx) {
10534                 if (host_to_target_timex(arg2, phtx) != 0) {
10535                     return -TARGET_EFAULT;
10536                 }
10537             }
10538         }
10539         return ret;
10540 #endif
10541 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10542     case TARGET_NR_clock_adjtime64:
10543         {
10544             struct timex htx;
10545 
10546             if (target_to_host_timex64(&htx, arg2) != 0) {
10547                 return -TARGET_EFAULT;
10548             }
10549             ret = get_errno(clock_adjtime(arg1, &htx));
10550             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10551                     return -TARGET_EFAULT;
10552             }
10553         }
10554         return ret;
10555 #endif
10556     case TARGET_NR_getpgid:
10557         return get_errno(getpgid(arg1));
10558     case TARGET_NR_fchdir:
10559         return get_errno(fchdir(arg1));
10560     case TARGET_NR_personality:
10561         return get_errno(personality(arg1));
10562 #ifdef TARGET_NR__llseek /* Not on alpha */
10563     case TARGET_NR__llseek:
10564         {
10565             int64_t res;
10566 #if !defined(__NR_llseek)
10567             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10568             if (res == -1) {
10569                 ret = get_errno(res);
10570             } else {
10571                 ret = 0;
10572             }
10573 #else
10574             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10575 #endif
10576             if ((ret == 0) && put_user_s64(res, arg4)) {
10577                 return -TARGET_EFAULT;
10578             }
10579         }
10580         return ret;
10581 #endif
10582 #ifdef TARGET_NR_getdents
10583     case TARGET_NR_getdents:
10584         return do_getdents(arg1, arg2, arg3);
10585 #endif /* TARGET_NR_getdents */
10586 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10587     case TARGET_NR_getdents64:
10588         return do_getdents64(arg1, arg2, arg3);
10589 #endif /* TARGET_NR_getdents64 */
10590 #if defined(TARGET_NR__newselect)
10591     case TARGET_NR__newselect:
10592         return do_select(arg1, arg2, arg3, arg4, arg5);
10593 #endif
10594 #ifdef TARGET_NR_poll
10595     case TARGET_NR_poll:
10596         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10597 #endif
10598 #ifdef TARGET_NR_ppoll
10599     case TARGET_NR_ppoll:
10600         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10601 #endif
10602 #ifdef TARGET_NR_ppoll_time64
10603     case TARGET_NR_ppoll_time64:
10604         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10605 #endif
10606     case TARGET_NR_flock:
10607         /* NOTE: the flock constant seems to be the same for every
10608            Linux platform */
10609         return get_errno(safe_flock(arg1, arg2));
10610     case TARGET_NR_readv:
10611         {
10612             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10613             if (vec != NULL) {
10614                 ret = get_errno(safe_readv(arg1, vec, arg3));
10615                 unlock_iovec(vec, arg2, arg3, 1);
10616             } else {
10617                 ret = -host_to_target_errno(errno);
10618             }
10619         }
10620         return ret;
10621     case TARGET_NR_writev:
10622         {
10623             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10624             if (vec != NULL) {
10625                 ret = get_errno(safe_writev(arg1, vec, arg3));
10626                 unlock_iovec(vec, arg2, arg3, 0);
10627             } else {
10628                 ret = -host_to_target_errno(errno);
10629             }
10630         }
10631         return ret;
10632 #if defined(TARGET_NR_preadv)
10633     case TARGET_NR_preadv:
10634         {
10635             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10636             if (vec != NULL) {
10637                 unsigned long low, high;
10638 
10639                 target_to_host_low_high(arg4, arg5, &low, &high);
10640                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10641                 unlock_iovec(vec, arg2, arg3, 1);
10642             } else {
10643                 ret = -host_to_target_errno(errno);
10644            }
10645         }
10646         return ret;
10647 #endif
10648 #if defined(TARGET_NR_pwritev)
10649     case TARGET_NR_pwritev:
10650         {
10651             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10652             if (vec != NULL) {
10653                 unsigned long low, high;
10654 
10655                 target_to_host_low_high(arg4, arg5, &low, &high);
10656                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10657                 unlock_iovec(vec, arg2, arg3, 0);
10658             } else {
10659                 ret = -host_to_target_errno(errno);
10660            }
10661         }
10662         return ret;
10663 #endif
10664     case TARGET_NR_getsid:
10665         return get_errno(getsid(arg1));
10666 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10667     case TARGET_NR_fdatasync:
10668         return get_errno(fdatasync(arg1));
10669 #endif
10670     case TARGET_NR_sched_getaffinity:
10671         {
10672             unsigned int mask_size;
10673             unsigned long *mask;
10674 
10675             /*
10676              * sched_getaffinity needs multiples of ulong, so need to take
10677              * care of mismatches between target ulong and host ulong sizes.
10678              */
10679             if (arg2 & (sizeof(abi_ulong) - 1)) {
10680                 return -TARGET_EINVAL;
10681             }
10682             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10683 
10684             mask = alloca(mask_size);
10685             memset(mask, 0, mask_size);
10686             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10687 
10688             if (!is_error(ret)) {
10689                 if (ret > arg2) {
10690                     /* More data returned than the caller's buffer will fit.
10691                      * This only happens if sizeof(abi_long) < sizeof(long)
10692                      * and the caller passed us a buffer holding an odd number
10693                      * of abi_longs. If the host kernel is actually using the
10694                      * extra 4 bytes then fail EINVAL; otherwise we can just
10695                      * ignore them and only copy the interesting part.
10696                      */
10697                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10698                     if (numcpus > arg2 * 8) {
10699                         return -TARGET_EINVAL;
10700                     }
10701                     ret = arg2;
10702                 }
10703 
10704                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10705                     return -TARGET_EFAULT;
10706                 }
10707             }
10708         }
10709         return ret;
10710     case TARGET_NR_sched_setaffinity:
10711         {
10712             unsigned int mask_size;
10713             unsigned long *mask;
10714 
10715             /*
10716              * sched_setaffinity needs multiples of ulong, so need to take
10717              * care of mismatches between target ulong and host ulong sizes.
10718              */
10719             if (arg2 & (sizeof(abi_ulong) - 1)) {
10720                 return -TARGET_EINVAL;
10721             }
10722             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10723             mask = alloca(mask_size);
10724 
10725             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10726             if (ret) {
10727                 return ret;
10728             }
10729 
10730             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10731         }
10732     case TARGET_NR_getcpu:
10733         {
10734             unsigned cpu, node;
10735             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10736                                        arg2 ? &node : NULL,
10737                                        NULL));
10738             if (is_error(ret)) {
10739                 return ret;
10740             }
10741             if (arg1 && put_user_u32(cpu, arg1)) {
10742                 return -TARGET_EFAULT;
10743             }
10744             if (arg2 && put_user_u32(node, arg2)) {
10745                 return -TARGET_EFAULT;
10746             }
10747         }
10748         return ret;
10749     case TARGET_NR_sched_setparam:
10750         {
10751             struct sched_param *target_schp;
10752             struct sched_param schp;
10753 
10754             if (arg2 == 0) {
10755                 return -TARGET_EINVAL;
10756             }
10757             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10758                 return -TARGET_EFAULT;
10759             schp.sched_priority = tswap32(target_schp->sched_priority);
10760             unlock_user_struct(target_schp, arg2, 0);
10761             return get_errno(sched_setparam(arg1, &schp));
10762         }
10763     case TARGET_NR_sched_getparam:
10764         {
10765             struct sched_param *target_schp;
10766             struct sched_param schp;
10767 
10768             if (arg2 == 0) {
10769                 return -TARGET_EINVAL;
10770             }
10771             ret = get_errno(sched_getparam(arg1, &schp));
10772             if (!is_error(ret)) {
10773                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10774                     return -TARGET_EFAULT;
10775                 target_schp->sched_priority = tswap32(schp.sched_priority);
10776                 unlock_user_struct(target_schp, arg2, 1);
10777             }
10778         }
10779         return ret;
10780     case TARGET_NR_sched_setscheduler:
10781         {
10782             struct sched_param *target_schp;
10783             struct sched_param schp;
10784             if (arg3 == 0) {
10785                 return -TARGET_EINVAL;
10786             }
10787             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10788                 return -TARGET_EFAULT;
10789             schp.sched_priority = tswap32(target_schp->sched_priority);
10790             unlock_user_struct(target_schp, arg3, 0);
10791             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10792         }
10793     case TARGET_NR_sched_getscheduler:
10794         return get_errno(sched_getscheduler(arg1));
10795     case TARGET_NR_sched_yield:
10796         return get_errno(sched_yield());
10797     case TARGET_NR_sched_get_priority_max:
10798         return get_errno(sched_get_priority_max(arg1));
10799     case TARGET_NR_sched_get_priority_min:
10800         return get_errno(sched_get_priority_min(arg1));
10801 #ifdef TARGET_NR_sched_rr_get_interval
10802     case TARGET_NR_sched_rr_get_interval:
10803         {
10804             struct timespec ts;
10805             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10806             if (!is_error(ret)) {
10807                 ret = host_to_target_timespec(arg2, &ts);
10808             }
10809         }
10810         return ret;
10811 #endif
10812 #ifdef TARGET_NR_sched_rr_get_interval_time64
10813     case TARGET_NR_sched_rr_get_interval_time64:
10814         {
10815             struct timespec ts;
10816             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10817             if (!is_error(ret)) {
10818                 ret = host_to_target_timespec64(arg2, &ts);
10819             }
10820         }
10821         return ret;
10822 #endif
10823 #if defined(TARGET_NR_nanosleep)
10824     case TARGET_NR_nanosleep:
10825         {
10826             struct timespec req, rem;
10827             target_to_host_timespec(&req, arg1);
10828             ret = get_errno(safe_nanosleep(&req, &rem));
10829             if (is_error(ret) && arg2) {
10830                 host_to_target_timespec(arg2, &rem);
10831             }
10832         }
10833         return ret;
10834 #endif
10835     case TARGET_NR_prctl:
10836         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10837         break;
10838 #ifdef TARGET_NR_arch_prctl
10839     case TARGET_NR_arch_prctl:
10840         return do_arch_prctl(cpu_env, arg1, arg2);
10841 #endif
10842 #ifdef TARGET_NR_pread64
10843     case TARGET_NR_pread64:
10844         if (regpairs_aligned(cpu_env, num)) {
10845             arg4 = arg5;
10846             arg5 = arg6;
10847         }
10848         if (arg2 == 0 && arg3 == 0) {
10849             /* Special-case NULL buffer and zero length, which should succeed */
10850             p = 0;
10851         } else {
10852             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10853             if (!p) {
10854                 return -TARGET_EFAULT;
10855             }
10856         }
10857         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10858         unlock_user(p, arg2, ret);
10859         return ret;
10860     case TARGET_NR_pwrite64:
10861         if (regpairs_aligned(cpu_env, num)) {
10862             arg4 = arg5;
10863             arg5 = arg6;
10864         }
10865         if (arg2 == 0 && arg3 == 0) {
10866             /* Special-case NULL buffer and zero length, which should succeed */
10867             p = 0;
10868         } else {
10869             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10870             if (!p) {
10871                 return -TARGET_EFAULT;
10872             }
10873         }
10874         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10875         unlock_user(p, arg2, 0);
10876         return ret;
10877 #endif
10878     case TARGET_NR_getcwd:
10879         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10880             return -TARGET_EFAULT;
10881         ret = get_errno(sys_getcwd1(p, arg2));
10882         unlock_user(p, arg1, ret);
10883         return ret;
10884     case TARGET_NR_capget:
10885     case TARGET_NR_capset:
10886     {
10887         struct target_user_cap_header *target_header;
10888         struct target_user_cap_data *target_data = NULL;
10889         struct __user_cap_header_struct header;
10890         struct __user_cap_data_struct data[2];
10891         struct __user_cap_data_struct *dataptr = NULL;
10892         int i, target_datalen;
10893         int data_items = 1;
10894 
10895         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10896             return -TARGET_EFAULT;
10897         }
10898         header.version = tswap32(target_header->version);
10899         header.pid = tswap32(target_header->pid);
10900 
10901         if (header.version != _LINUX_CAPABILITY_VERSION) {
10902             /* Version 2 and up takes pointer to two user_data structs */
10903             data_items = 2;
10904         }
10905 
10906         target_datalen = sizeof(*target_data) * data_items;
10907 
10908         if (arg2) {
10909             if (num == TARGET_NR_capget) {
10910                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10911             } else {
10912                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10913             }
10914             if (!target_data) {
10915                 unlock_user_struct(target_header, arg1, 0);
10916                 return -TARGET_EFAULT;
10917             }
10918 
10919             if (num == TARGET_NR_capset) {
10920                 for (i = 0; i < data_items; i++) {
10921                     data[i].effective = tswap32(target_data[i].effective);
10922                     data[i].permitted = tswap32(target_data[i].permitted);
10923                     data[i].inheritable = tswap32(target_data[i].inheritable);
10924                 }
10925             }
10926 
10927             dataptr = data;
10928         }
10929 
10930         if (num == TARGET_NR_capget) {
10931             ret = get_errno(capget(&header, dataptr));
10932         } else {
10933             ret = get_errno(capset(&header, dataptr));
10934         }
10935 
10936         /* The kernel always updates version for both capget and capset */
10937         target_header->version = tswap32(header.version);
10938         unlock_user_struct(target_header, arg1, 1);
10939 
10940         if (arg2) {
10941             if (num == TARGET_NR_capget) {
10942                 for (i = 0; i < data_items; i++) {
10943                     target_data[i].effective = tswap32(data[i].effective);
10944                     target_data[i].permitted = tswap32(data[i].permitted);
10945                     target_data[i].inheritable = tswap32(data[i].inheritable);
10946                 }
10947                 unlock_user(target_data, arg2, target_datalen);
10948             } else {
10949                 unlock_user(target_data, arg2, 0);
10950             }
10951         }
10952         return ret;
10953     }
10954     case TARGET_NR_sigaltstack:
10955         return do_sigaltstack(arg1, arg2, cpu_env);
10956 
10957 #ifdef CONFIG_SENDFILE
10958 #ifdef TARGET_NR_sendfile
10959     case TARGET_NR_sendfile:
10960     {
10961         off_t *offp = NULL;
10962         off_t off;
10963         if (arg3) {
10964             ret = get_user_sal(off, arg3);
10965             if (is_error(ret)) {
10966                 return ret;
10967             }
10968             offp = &off;
10969         }
10970         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10971         if (!is_error(ret) && arg3) {
10972             abi_long ret2 = put_user_sal(off, arg3);
10973             if (is_error(ret2)) {
10974                 ret = ret2;
10975             }
10976         }
10977         return ret;
10978     }
10979 #endif
10980 #ifdef TARGET_NR_sendfile64
10981     case TARGET_NR_sendfile64:
10982     {
10983         off_t *offp = NULL;
10984         off_t off;
10985         if (arg3) {
10986             ret = get_user_s64(off, arg3);
10987             if (is_error(ret)) {
10988                 return ret;
10989             }
10990             offp = &off;
10991         }
10992         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10993         if (!is_error(ret) && arg3) {
10994             abi_long ret2 = put_user_s64(off, arg3);
10995             if (is_error(ret2)) {
10996                 ret = ret2;
10997             }
10998         }
10999         return ret;
11000     }
11001 #endif
11002 #endif
11003 #ifdef TARGET_NR_vfork
11004     case TARGET_NR_vfork:
11005         return get_errno(do_fork(cpu_env,
11006                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11007                          0, 0, 0, 0));
11008 #endif
11009 #ifdef TARGET_NR_ugetrlimit
11010     case TARGET_NR_ugetrlimit:
11011     {
11012 	struct rlimit rlim;
11013 	int resource = target_to_host_resource(arg1);
11014 	ret = get_errno(getrlimit(resource, &rlim));
11015 	if (!is_error(ret)) {
11016 	    struct target_rlimit *target_rlim;
11017             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11018                 return -TARGET_EFAULT;
11019 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11020 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11021             unlock_user_struct(target_rlim, arg2, 1);
11022 	}
11023         return ret;
11024     }
11025 #endif
11026 #ifdef TARGET_NR_truncate64
11027     case TARGET_NR_truncate64:
11028         if (!(p = lock_user_string(arg1)))
11029             return -TARGET_EFAULT;
11030 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11031         unlock_user(p, arg1, 0);
11032         return ret;
11033 #endif
11034 #ifdef TARGET_NR_ftruncate64
11035     case TARGET_NR_ftruncate64:
11036         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11037 #endif
11038 #ifdef TARGET_NR_stat64
11039     case TARGET_NR_stat64:
11040         if (!(p = lock_user_string(arg1))) {
11041             return -TARGET_EFAULT;
11042         }
11043         ret = get_errno(stat(path(p), &st));
11044         unlock_user(p, arg1, 0);
11045         if (!is_error(ret))
11046             ret = host_to_target_stat64(cpu_env, arg2, &st);
11047         return ret;
11048 #endif
11049 #ifdef TARGET_NR_lstat64
11050     case TARGET_NR_lstat64:
11051         if (!(p = lock_user_string(arg1))) {
11052             return -TARGET_EFAULT;
11053         }
11054         ret = get_errno(lstat(path(p), &st));
11055         unlock_user(p, arg1, 0);
11056         if (!is_error(ret))
11057             ret = host_to_target_stat64(cpu_env, arg2, &st);
11058         return ret;
11059 #endif
11060 #ifdef TARGET_NR_fstat64
11061     case TARGET_NR_fstat64:
11062         ret = get_errno(fstat(arg1, &st));
11063         if (!is_error(ret))
11064             ret = host_to_target_stat64(cpu_env, arg2, &st);
11065         return ret;
11066 #endif
11067 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11068 #ifdef TARGET_NR_fstatat64
11069     case TARGET_NR_fstatat64:
11070 #endif
11071 #ifdef TARGET_NR_newfstatat
11072     case TARGET_NR_newfstatat:
11073 #endif
11074         if (!(p = lock_user_string(arg2))) {
11075             return -TARGET_EFAULT;
11076         }
11077         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11078         unlock_user(p, arg2, 0);
11079         if (!is_error(ret))
11080             ret = host_to_target_stat64(cpu_env, arg3, &st);
11081         return ret;
11082 #endif
11083 #if defined(TARGET_NR_statx)
11084     case TARGET_NR_statx:
11085         {
11086             struct target_statx *target_stx;
11087             int dirfd = arg1;
11088             int flags = arg3;
11089 
11090             p = lock_user_string(arg2);
11091             if (p == NULL) {
11092                 return -TARGET_EFAULT;
11093             }
11094 #if defined(__NR_statx)
11095             {
11096                 /*
11097                  * It is assumed that struct statx is architecture independent.
11098                  */
11099                 struct target_statx host_stx;
11100                 int mask = arg4;
11101 
11102                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11103                 if (!is_error(ret)) {
11104                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11105                         unlock_user(p, arg2, 0);
11106                         return -TARGET_EFAULT;
11107                     }
11108                 }
11109 
11110                 if (ret != -TARGET_ENOSYS) {
11111                     unlock_user(p, arg2, 0);
11112                     return ret;
11113                 }
11114             }
11115 #endif
11116             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11117             unlock_user(p, arg2, 0);
11118 
11119             if (!is_error(ret)) {
11120                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11121                     return -TARGET_EFAULT;
11122                 }
11123                 memset(target_stx, 0, sizeof(*target_stx));
11124                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11125                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11126                 __put_user(st.st_ino, &target_stx->stx_ino);
11127                 __put_user(st.st_mode, &target_stx->stx_mode);
11128                 __put_user(st.st_uid, &target_stx->stx_uid);
11129                 __put_user(st.st_gid, &target_stx->stx_gid);
11130                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11131                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11132                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11133                 __put_user(st.st_size, &target_stx->stx_size);
11134                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11135                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11136                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11137                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11138                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11139                 unlock_user_struct(target_stx, arg5, 1);
11140             }
11141         }
11142         return ret;
11143 #endif
11144 #ifdef TARGET_NR_lchown
11145     case TARGET_NR_lchown:
11146         if (!(p = lock_user_string(arg1)))
11147             return -TARGET_EFAULT;
11148         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11149         unlock_user(p, arg1, 0);
11150         return ret;
11151 #endif
11152 #ifdef TARGET_NR_getuid
11153     case TARGET_NR_getuid:
11154         return get_errno(high2lowuid(getuid()));
11155 #endif
11156 #ifdef TARGET_NR_getgid
11157     case TARGET_NR_getgid:
11158         return get_errno(high2lowgid(getgid()));
11159 #endif
11160 #ifdef TARGET_NR_geteuid
11161     case TARGET_NR_geteuid:
11162         return get_errno(high2lowuid(geteuid()));
11163 #endif
11164 #ifdef TARGET_NR_getegid
11165     case TARGET_NR_getegid:
11166         return get_errno(high2lowgid(getegid()));
11167 #endif
11168     case TARGET_NR_setreuid:
11169         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11170     case TARGET_NR_setregid:
11171         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11172     case TARGET_NR_getgroups:
11173         {
11174             int gidsetsize = arg1;
11175             target_id *target_grouplist;
11176             gid_t *grouplist;
11177             int i;
11178 
11179             grouplist = alloca(gidsetsize * sizeof(gid_t));
11180             ret = get_errno(getgroups(gidsetsize, grouplist));
11181             if (gidsetsize == 0)
11182                 return ret;
11183             if (!is_error(ret)) {
11184                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11185                 if (!target_grouplist)
11186                     return -TARGET_EFAULT;
11187                 for(i = 0;i < ret; i++)
11188                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11189                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11190             }
11191         }
11192         return ret;
11193     case TARGET_NR_setgroups:
11194         {
11195             int gidsetsize = arg1;
11196             target_id *target_grouplist;
11197             gid_t *grouplist = NULL;
11198             int i;
11199             if (gidsetsize) {
11200                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11201                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11202                 if (!target_grouplist) {
11203                     return -TARGET_EFAULT;
11204                 }
11205                 for (i = 0; i < gidsetsize; i++) {
11206                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11207                 }
11208                 unlock_user(target_grouplist, arg2, 0);
11209             }
11210             return get_errno(setgroups(gidsetsize, grouplist));
11211         }
11212     case TARGET_NR_fchown:
11213         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11214 #if defined(TARGET_NR_fchownat)
11215     case TARGET_NR_fchownat:
11216         if (!(p = lock_user_string(arg2)))
11217             return -TARGET_EFAULT;
11218         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11219                                  low2highgid(arg4), arg5));
11220         unlock_user(p, arg2, 0);
11221         return ret;
11222 #endif
11223 #ifdef TARGET_NR_setresuid
11224     case TARGET_NR_setresuid:
11225         return get_errno(sys_setresuid(low2highuid(arg1),
11226                                        low2highuid(arg2),
11227                                        low2highuid(arg3)));
11228 #endif
11229 #ifdef TARGET_NR_getresuid
11230     case TARGET_NR_getresuid:
11231         {
11232             uid_t ruid, euid, suid;
11233             ret = get_errno(getresuid(&ruid, &euid, &suid));
11234             if (!is_error(ret)) {
11235                 if (put_user_id(high2lowuid(ruid), arg1)
11236                     || put_user_id(high2lowuid(euid), arg2)
11237                     || put_user_id(high2lowuid(suid), arg3))
11238                     return -TARGET_EFAULT;
11239             }
11240         }
11241         return ret;
11242 #endif
11243 #ifdef TARGET_NR_getresgid
11244     case TARGET_NR_setresgid:
11245         return get_errno(sys_setresgid(low2highgid(arg1),
11246                                        low2highgid(arg2),
11247                                        low2highgid(arg3)));
11248 #endif
11249 #ifdef TARGET_NR_getresgid
11250     case TARGET_NR_getresgid:
11251         {
11252             gid_t rgid, egid, sgid;
11253             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11254             if (!is_error(ret)) {
11255                 if (put_user_id(high2lowgid(rgid), arg1)
11256                     || put_user_id(high2lowgid(egid), arg2)
11257                     || put_user_id(high2lowgid(sgid), arg3))
11258                     return -TARGET_EFAULT;
11259             }
11260         }
11261         return ret;
11262 #endif
11263 #ifdef TARGET_NR_chown
11264     case TARGET_NR_chown:
11265         if (!(p = lock_user_string(arg1)))
11266             return -TARGET_EFAULT;
11267         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11268         unlock_user(p, arg1, 0);
11269         return ret;
11270 #endif
11271     case TARGET_NR_setuid:
11272         return get_errno(sys_setuid(low2highuid(arg1)));
11273     case TARGET_NR_setgid:
11274         return get_errno(sys_setgid(low2highgid(arg1)));
11275     case TARGET_NR_setfsuid:
11276         return get_errno(setfsuid(arg1));
11277     case TARGET_NR_setfsgid:
11278         return get_errno(setfsgid(arg1));
11279 
11280 #ifdef TARGET_NR_lchown32
11281     case TARGET_NR_lchown32:
11282         if (!(p = lock_user_string(arg1)))
11283             return -TARGET_EFAULT;
11284         ret = get_errno(lchown(p, arg2, arg3));
11285         unlock_user(p, arg1, 0);
11286         return ret;
11287 #endif
11288 #ifdef TARGET_NR_getuid32
11289     case TARGET_NR_getuid32:
11290         return get_errno(getuid());
11291 #endif
11292 
11293 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11294    /* Alpha specific */
11295     case TARGET_NR_getxuid:
11296          {
11297             uid_t euid;
11298             euid=geteuid();
11299             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11300          }
11301         return get_errno(getuid());
11302 #endif
11303 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11304    /* Alpha specific */
11305     case TARGET_NR_getxgid:
11306          {
11307             uid_t egid;
11308             egid=getegid();
11309             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11310          }
11311         return get_errno(getgid());
11312 #endif
11313 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11314     /* Alpha specific */
11315     case TARGET_NR_osf_getsysinfo:
11316         ret = -TARGET_EOPNOTSUPP;
11317         switch (arg1) {
11318           case TARGET_GSI_IEEE_FP_CONTROL:
11319             {
11320                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11321                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11322 
11323                 swcr &= ~SWCR_STATUS_MASK;
11324                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11325 
11326                 if (put_user_u64 (swcr, arg2))
11327                         return -TARGET_EFAULT;
11328                 ret = 0;
11329             }
11330             break;
11331 
11332           /* case GSI_IEEE_STATE_AT_SIGNAL:
11333              -- Not implemented in linux kernel.
11334              case GSI_UACPROC:
11335              -- Retrieves current unaligned access state; not much used.
11336              case GSI_PROC_TYPE:
11337              -- Retrieves implver information; surely not used.
11338              case GSI_GET_HWRPB:
11339              -- Grabs a copy of the HWRPB; surely not used.
11340           */
11341         }
11342         return ret;
11343 #endif
11344 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11345     /* Alpha specific */
11346     case TARGET_NR_osf_setsysinfo:
11347         ret = -TARGET_EOPNOTSUPP;
11348         switch (arg1) {
11349           case TARGET_SSI_IEEE_FP_CONTROL:
11350             {
11351                 uint64_t swcr, fpcr;
11352 
11353                 if (get_user_u64 (swcr, arg2)) {
11354                     return -TARGET_EFAULT;
11355                 }
11356 
11357                 /*
11358                  * The kernel calls swcr_update_status to update the
11359                  * status bits from the fpcr at every point that it
11360                  * could be queried.  Therefore, we store the status
11361                  * bits only in FPCR.
11362                  */
11363                 ((CPUAlphaState *)cpu_env)->swcr
11364                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11365 
11366                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11367                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11368                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11369                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11370                 ret = 0;
11371             }
11372             break;
11373 
11374           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11375             {
11376                 uint64_t exc, fpcr, fex;
11377 
11378                 if (get_user_u64(exc, arg2)) {
11379                     return -TARGET_EFAULT;
11380                 }
11381                 exc &= SWCR_STATUS_MASK;
11382                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11383 
11384                 /* Old exceptions are not signaled.  */
11385                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11386                 fex = exc & ~fex;
11387                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11388                 fex &= ((CPUArchState *)cpu_env)->swcr;
11389 
11390                 /* Update the hardware fpcr.  */
11391                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11392                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11393 
11394                 if (fex) {
11395                     int si_code = TARGET_FPE_FLTUNK;
11396                     target_siginfo_t info;
11397 
11398                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11399                         si_code = TARGET_FPE_FLTUND;
11400                     }
11401                     if (fex & SWCR_TRAP_ENABLE_INE) {
11402                         si_code = TARGET_FPE_FLTRES;
11403                     }
11404                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11405                         si_code = TARGET_FPE_FLTUND;
11406                     }
11407                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11408                         si_code = TARGET_FPE_FLTOVF;
11409                     }
11410                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11411                         si_code = TARGET_FPE_FLTDIV;
11412                     }
11413                     if (fex & SWCR_TRAP_ENABLE_INV) {
11414                         si_code = TARGET_FPE_FLTINV;
11415                     }
11416 
11417                     info.si_signo = SIGFPE;
11418                     info.si_errno = 0;
11419                     info.si_code = si_code;
11420                     info._sifields._sigfault._addr
11421                         = ((CPUArchState *)cpu_env)->pc;
11422                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11423                                  QEMU_SI_FAULT, &info);
11424                 }
11425                 ret = 0;
11426             }
11427             break;
11428 
11429           /* case SSI_NVPAIRS:
11430              -- Used with SSIN_UACPROC to enable unaligned accesses.
11431              case SSI_IEEE_STATE_AT_SIGNAL:
11432              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11433              -- Not implemented in linux kernel
11434           */
11435         }
11436         return ret;
11437 #endif
11438 #ifdef TARGET_NR_osf_sigprocmask
11439     /* Alpha specific.  */
11440     case TARGET_NR_osf_sigprocmask:
11441         {
11442             abi_ulong mask;
11443             int how;
11444             sigset_t set, oldset;
11445 
11446             switch(arg1) {
11447             case TARGET_SIG_BLOCK:
11448                 how = SIG_BLOCK;
11449                 break;
11450             case TARGET_SIG_UNBLOCK:
11451                 how = SIG_UNBLOCK;
11452                 break;
11453             case TARGET_SIG_SETMASK:
11454                 how = SIG_SETMASK;
11455                 break;
11456             default:
11457                 return -TARGET_EINVAL;
11458             }
11459             mask = arg2;
11460             target_to_host_old_sigset(&set, &mask);
11461             ret = do_sigprocmask(how, &set, &oldset);
11462             if (!ret) {
11463                 host_to_target_old_sigset(&mask, &oldset);
11464                 ret = mask;
11465             }
11466         }
11467         return ret;
11468 #endif
11469 
11470 #ifdef TARGET_NR_getgid32
11471     case TARGET_NR_getgid32:
11472         return get_errno(getgid());
11473 #endif
11474 #ifdef TARGET_NR_geteuid32
11475     case TARGET_NR_geteuid32:
11476         return get_errno(geteuid());
11477 #endif
11478 #ifdef TARGET_NR_getegid32
11479     case TARGET_NR_getegid32:
11480         return get_errno(getegid());
11481 #endif
11482 #ifdef TARGET_NR_setreuid32
11483     case TARGET_NR_setreuid32:
11484         return get_errno(setreuid(arg1, arg2));
11485 #endif
11486 #ifdef TARGET_NR_setregid32
11487     case TARGET_NR_setregid32:
11488         return get_errno(setregid(arg1, arg2));
11489 #endif
11490 #ifdef TARGET_NR_getgroups32
11491     case TARGET_NR_getgroups32:
11492         {
11493             int gidsetsize = arg1;
11494             uint32_t *target_grouplist;
11495             gid_t *grouplist;
11496             int i;
11497 
11498             grouplist = alloca(gidsetsize * sizeof(gid_t));
11499             ret = get_errno(getgroups(gidsetsize, grouplist));
11500             if (gidsetsize == 0)
11501                 return ret;
11502             if (!is_error(ret)) {
11503                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11504                 if (!target_grouplist) {
11505                     return -TARGET_EFAULT;
11506                 }
11507                 for(i = 0;i < ret; i++)
11508                     target_grouplist[i] = tswap32(grouplist[i]);
11509                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11510             }
11511         }
11512         return ret;
11513 #endif
11514 #ifdef TARGET_NR_setgroups32
11515     case TARGET_NR_setgroups32:
11516         {
11517             int gidsetsize = arg1;
11518             uint32_t *target_grouplist;
11519             gid_t *grouplist;
11520             int i;
11521 
11522             grouplist = alloca(gidsetsize * sizeof(gid_t));
11523             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11524             if (!target_grouplist) {
11525                 return -TARGET_EFAULT;
11526             }
11527             for(i = 0;i < gidsetsize; i++)
11528                 grouplist[i] = tswap32(target_grouplist[i]);
11529             unlock_user(target_grouplist, arg2, 0);
11530             return get_errno(setgroups(gidsetsize, grouplist));
11531         }
11532 #endif
11533 #ifdef TARGET_NR_fchown32
11534     case TARGET_NR_fchown32:
11535         return get_errno(fchown(arg1, arg2, arg3));
11536 #endif
11537 #ifdef TARGET_NR_setresuid32
11538     case TARGET_NR_setresuid32:
11539         return get_errno(sys_setresuid(arg1, arg2, arg3));
11540 #endif
11541 #ifdef TARGET_NR_getresuid32
11542     case TARGET_NR_getresuid32:
11543         {
11544             uid_t ruid, euid, suid;
11545             ret = get_errno(getresuid(&ruid, &euid, &suid));
11546             if (!is_error(ret)) {
11547                 if (put_user_u32(ruid, arg1)
11548                     || put_user_u32(euid, arg2)
11549                     || put_user_u32(suid, arg3))
11550                     return -TARGET_EFAULT;
11551             }
11552         }
11553         return ret;
11554 #endif
11555 #ifdef TARGET_NR_setresgid32
11556     case TARGET_NR_setresgid32:
11557         return get_errno(sys_setresgid(arg1, arg2, arg3));
11558 #endif
11559 #ifdef TARGET_NR_getresgid32
11560     case TARGET_NR_getresgid32:
11561         {
11562             gid_t rgid, egid, sgid;
11563             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11564             if (!is_error(ret)) {
11565                 if (put_user_u32(rgid, arg1)
11566                     || put_user_u32(egid, arg2)
11567                     || put_user_u32(sgid, arg3))
11568                     return -TARGET_EFAULT;
11569             }
11570         }
11571         return ret;
11572 #endif
11573 #ifdef TARGET_NR_chown32
11574     case TARGET_NR_chown32:
11575         if (!(p = lock_user_string(arg1)))
11576             return -TARGET_EFAULT;
11577         ret = get_errno(chown(p, arg2, arg3));
11578         unlock_user(p, arg1, 0);
11579         return ret;
11580 #endif
11581 #ifdef TARGET_NR_setuid32
11582     case TARGET_NR_setuid32:
11583         return get_errno(sys_setuid(arg1));
11584 #endif
11585 #ifdef TARGET_NR_setgid32
11586     case TARGET_NR_setgid32:
11587         return get_errno(sys_setgid(arg1));
11588 #endif
11589 #ifdef TARGET_NR_setfsuid32
11590     case TARGET_NR_setfsuid32:
11591         return get_errno(setfsuid(arg1));
11592 #endif
11593 #ifdef TARGET_NR_setfsgid32
11594     case TARGET_NR_setfsgid32:
11595         return get_errno(setfsgid(arg1));
11596 #endif
11597 #ifdef TARGET_NR_mincore
11598     case TARGET_NR_mincore:
11599         {
11600             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11601             if (!a) {
11602                 return -TARGET_ENOMEM;
11603             }
11604             p = lock_user_string(arg3);
11605             if (!p) {
11606                 ret = -TARGET_EFAULT;
11607             } else {
11608                 ret = get_errno(mincore(a, arg2, p));
11609                 unlock_user(p, arg3, ret);
11610             }
11611             unlock_user(a, arg1, 0);
11612         }
11613         return ret;
11614 #endif
11615 #ifdef TARGET_NR_arm_fadvise64_64
11616     case TARGET_NR_arm_fadvise64_64:
11617         /* arm_fadvise64_64 looks like fadvise64_64 but
11618          * with different argument order: fd, advice, offset, len
11619          * rather than the usual fd, offset, len, advice.
11620          * Note that offset and len are both 64-bit so appear as
11621          * pairs of 32-bit registers.
11622          */
11623         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11624                             target_offset64(arg5, arg6), arg2);
11625         return -host_to_target_errno(ret);
11626 #endif
11627 
11628 #if TARGET_ABI_BITS == 32
11629 
11630 #ifdef TARGET_NR_fadvise64_64
11631     case TARGET_NR_fadvise64_64:
11632 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11633         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11634         ret = arg2;
11635         arg2 = arg3;
11636         arg3 = arg4;
11637         arg4 = arg5;
11638         arg5 = arg6;
11639         arg6 = ret;
11640 #else
11641         /* 6 args: fd, offset (high, low), len (high, low), advice */
11642         if (regpairs_aligned(cpu_env, num)) {
11643             /* offset is in (3,4), len in (5,6) and advice in 7 */
11644             arg2 = arg3;
11645             arg3 = arg4;
11646             arg4 = arg5;
11647             arg5 = arg6;
11648             arg6 = arg7;
11649         }
11650 #endif
11651         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11652                             target_offset64(arg4, arg5), arg6);
11653         return -host_to_target_errno(ret);
11654 #endif
11655 
11656 #ifdef TARGET_NR_fadvise64
11657     case TARGET_NR_fadvise64:
11658         /* 5 args: fd, offset (high, low), len, advice */
11659         if (regpairs_aligned(cpu_env, num)) {
11660             /* offset is in (3,4), len in 5 and advice in 6 */
11661             arg2 = arg3;
11662             arg3 = arg4;
11663             arg4 = arg5;
11664             arg5 = arg6;
11665         }
11666         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11667         return -host_to_target_errno(ret);
11668 #endif
11669 
11670 #else /* not a 32-bit ABI */
11671 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11672 #ifdef TARGET_NR_fadvise64_64
11673     case TARGET_NR_fadvise64_64:
11674 #endif
11675 #ifdef TARGET_NR_fadvise64
11676     case TARGET_NR_fadvise64:
11677 #endif
11678 #ifdef TARGET_S390X
11679         switch (arg4) {
11680         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11681         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11682         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11683         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11684         default: break;
11685         }
11686 #endif
11687         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11688 #endif
11689 #endif /* end of 64-bit ABI fadvise handling */
11690 
11691 #ifdef TARGET_NR_madvise
11692     case TARGET_NR_madvise:
11693         /* A straight passthrough may not be safe because qemu sometimes
11694            turns private file-backed mappings into anonymous mappings.
11695            This will break MADV_DONTNEED.
11696            This is a hint, so ignoring and returning success is ok.  */
11697         return 0;
11698 #endif
11699 #ifdef TARGET_NR_fcntl64
11700     case TARGET_NR_fcntl64:
11701     {
11702         int cmd;
11703         struct flock64 fl;
11704         from_flock64_fn *copyfrom = copy_from_user_flock64;
11705         to_flock64_fn *copyto = copy_to_user_flock64;
11706 
11707 #ifdef TARGET_ARM
11708         if (!((CPUARMState *)cpu_env)->eabi) {
11709             copyfrom = copy_from_user_oabi_flock64;
11710             copyto = copy_to_user_oabi_flock64;
11711         }
11712 #endif
11713 
11714         cmd = target_to_host_fcntl_cmd(arg2);
11715         if (cmd == -TARGET_EINVAL) {
11716             return cmd;
11717         }
11718 
11719         switch(arg2) {
11720         case TARGET_F_GETLK64:
11721             ret = copyfrom(&fl, arg3);
11722             if (ret) {
11723                 break;
11724             }
11725             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11726             if (ret == 0) {
11727                 ret = copyto(arg3, &fl);
11728             }
11729 	    break;
11730 
11731         case TARGET_F_SETLK64:
11732         case TARGET_F_SETLKW64:
11733             ret = copyfrom(&fl, arg3);
11734             if (ret) {
11735                 break;
11736             }
11737             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11738 	    break;
11739         default:
11740             ret = do_fcntl(arg1, arg2, arg3);
11741             break;
11742         }
11743         return ret;
11744     }
11745 #endif
11746 #ifdef TARGET_NR_cacheflush
11747     case TARGET_NR_cacheflush:
11748         /* self-modifying code is handled automatically, so nothing needed */
11749         return 0;
11750 #endif
11751 #ifdef TARGET_NR_getpagesize
11752     case TARGET_NR_getpagesize:
11753         return TARGET_PAGE_SIZE;
11754 #endif
11755     case TARGET_NR_gettid:
11756         return get_errno(sys_gettid());
11757 #ifdef TARGET_NR_readahead
11758     case TARGET_NR_readahead:
11759 #if TARGET_ABI_BITS == 32
11760         if (regpairs_aligned(cpu_env, num)) {
11761             arg2 = arg3;
11762             arg3 = arg4;
11763             arg4 = arg5;
11764         }
11765         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11766 #else
11767         ret = get_errno(readahead(arg1, arg2, arg3));
11768 #endif
11769         return ret;
11770 #endif
11771 #ifdef CONFIG_ATTR
11772 #ifdef TARGET_NR_setxattr
11773     case TARGET_NR_listxattr:
11774     case TARGET_NR_llistxattr:
11775     {
11776         void *p, *b = 0;
11777         if (arg2) {
11778             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11779             if (!b) {
11780                 return -TARGET_EFAULT;
11781             }
11782         }
11783         p = lock_user_string(arg1);
11784         if (p) {
11785             if (num == TARGET_NR_listxattr) {
11786                 ret = get_errno(listxattr(p, b, arg3));
11787             } else {
11788                 ret = get_errno(llistxattr(p, b, arg3));
11789             }
11790         } else {
11791             ret = -TARGET_EFAULT;
11792         }
11793         unlock_user(p, arg1, 0);
11794         unlock_user(b, arg2, arg3);
11795         return ret;
11796     }
11797     case TARGET_NR_flistxattr:
11798     {
11799         void *b = 0;
11800         if (arg2) {
11801             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11802             if (!b) {
11803                 return -TARGET_EFAULT;
11804             }
11805         }
11806         ret = get_errno(flistxattr(arg1, b, arg3));
11807         unlock_user(b, arg2, arg3);
11808         return ret;
11809     }
11810     case TARGET_NR_setxattr:
11811     case TARGET_NR_lsetxattr:
11812         {
11813             void *p, *n, *v = 0;
11814             if (arg3) {
11815                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11816                 if (!v) {
11817                     return -TARGET_EFAULT;
11818                 }
11819             }
11820             p = lock_user_string(arg1);
11821             n = lock_user_string(arg2);
11822             if (p && n) {
11823                 if (num == TARGET_NR_setxattr) {
11824                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11825                 } else {
11826                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11827                 }
11828             } else {
11829                 ret = -TARGET_EFAULT;
11830             }
11831             unlock_user(p, arg1, 0);
11832             unlock_user(n, arg2, 0);
11833             unlock_user(v, arg3, 0);
11834         }
11835         return ret;
11836     case TARGET_NR_fsetxattr:
11837         {
11838             void *n, *v = 0;
11839             if (arg3) {
11840                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11841                 if (!v) {
11842                     return -TARGET_EFAULT;
11843                 }
11844             }
11845             n = lock_user_string(arg2);
11846             if (n) {
11847                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11848             } else {
11849                 ret = -TARGET_EFAULT;
11850             }
11851             unlock_user(n, arg2, 0);
11852             unlock_user(v, arg3, 0);
11853         }
11854         return ret;
11855     case TARGET_NR_getxattr:
11856     case TARGET_NR_lgetxattr:
11857         {
11858             void *p, *n, *v = 0;
11859             if (arg3) {
11860                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11861                 if (!v) {
11862                     return -TARGET_EFAULT;
11863                 }
11864             }
11865             p = lock_user_string(arg1);
11866             n = lock_user_string(arg2);
11867             if (p && n) {
11868                 if (num == TARGET_NR_getxattr) {
11869                     ret = get_errno(getxattr(p, n, v, arg4));
11870                 } else {
11871                     ret = get_errno(lgetxattr(p, n, v, arg4));
11872                 }
11873             } else {
11874                 ret = -TARGET_EFAULT;
11875             }
11876             unlock_user(p, arg1, 0);
11877             unlock_user(n, arg2, 0);
11878             unlock_user(v, arg3, arg4);
11879         }
11880         return ret;
11881     case TARGET_NR_fgetxattr:
11882         {
11883             void *n, *v = 0;
11884             if (arg3) {
11885                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11886                 if (!v) {
11887                     return -TARGET_EFAULT;
11888                 }
11889             }
11890             n = lock_user_string(arg2);
11891             if (n) {
11892                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11893             } else {
11894                 ret = -TARGET_EFAULT;
11895             }
11896             unlock_user(n, arg2, 0);
11897             unlock_user(v, arg3, arg4);
11898         }
11899         return ret;
11900     case TARGET_NR_removexattr:
11901     case TARGET_NR_lremovexattr:
11902         {
11903             void *p, *n;
11904             p = lock_user_string(arg1);
11905             n = lock_user_string(arg2);
11906             if (p && n) {
11907                 if (num == TARGET_NR_removexattr) {
11908                     ret = get_errno(removexattr(p, n));
11909                 } else {
11910                     ret = get_errno(lremovexattr(p, n));
11911                 }
11912             } else {
11913                 ret = -TARGET_EFAULT;
11914             }
11915             unlock_user(p, arg1, 0);
11916             unlock_user(n, arg2, 0);
11917         }
11918         return ret;
11919     case TARGET_NR_fremovexattr:
11920         {
11921             void *n;
11922             n = lock_user_string(arg2);
11923             if (n) {
11924                 ret = get_errno(fremovexattr(arg1, n));
11925             } else {
11926                 ret = -TARGET_EFAULT;
11927             }
11928             unlock_user(n, arg2, 0);
11929         }
11930         return ret;
11931 #endif
11932 #endif /* CONFIG_ATTR */
11933 #ifdef TARGET_NR_set_thread_area
11934     case TARGET_NR_set_thread_area:
11935 #if defined(TARGET_MIPS)
11936       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11937       return 0;
11938 #elif defined(TARGET_CRIS)
11939       if (arg1 & 0xff)
11940           ret = -TARGET_EINVAL;
11941       else {
11942           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11943           ret = 0;
11944       }
11945       return ret;
11946 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11947       return do_set_thread_area(cpu_env, arg1);
11948 #elif defined(TARGET_M68K)
11949       {
11950           TaskState *ts = cpu->opaque;
11951           ts->tp_value = arg1;
11952           return 0;
11953       }
11954 #else
11955       return -TARGET_ENOSYS;
11956 #endif
11957 #endif
11958 #ifdef TARGET_NR_get_thread_area
11959     case TARGET_NR_get_thread_area:
11960 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11961         return do_get_thread_area(cpu_env, arg1);
11962 #elif defined(TARGET_M68K)
11963         {
11964             TaskState *ts = cpu->opaque;
11965             return ts->tp_value;
11966         }
11967 #else
11968         return -TARGET_ENOSYS;
11969 #endif
11970 #endif
11971 #ifdef TARGET_NR_getdomainname
11972     case TARGET_NR_getdomainname:
11973         return -TARGET_ENOSYS;
11974 #endif
11975 
11976 #ifdef TARGET_NR_clock_settime
11977     case TARGET_NR_clock_settime:
11978     {
11979         struct timespec ts;
11980 
11981         ret = target_to_host_timespec(&ts, arg2);
11982         if (!is_error(ret)) {
11983             ret = get_errno(clock_settime(arg1, &ts));
11984         }
11985         return ret;
11986     }
11987 #endif
11988 #ifdef TARGET_NR_clock_settime64
11989     case TARGET_NR_clock_settime64:
11990     {
11991         struct timespec ts;
11992 
11993         ret = target_to_host_timespec64(&ts, arg2);
11994         if (!is_error(ret)) {
11995             ret = get_errno(clock_settime(arg1, &ts));
11996         }
11997         return ret;
11998     }
11999 #endif
12000 #ifdef TARGET_NR_clock_gettime
12001     case TARGET_NR_clock_gettime:
12002     {
12003         struct timespec ts;
12004         ret = get_errno(clock_gettime(arg1, &ts));
12005         if (!is_error(ret)) {
12006             ret = host_to_target_timespec(arg2, &ts);
12007         }
12008         return ret;
12009     }
12010 #endif
12011 #ifdef TARGET_NR_clock_gettime64
12012     case TARGET_NR_clock_gettime64:
12013     {
12014         struct timespec ts;
12015         ret = get_errno(clock_gettime(arg1, &ts));
12016         if (!is_error(ret)) {
12017             ret = host_to_target_timespec64(arg2, &ts);
12018         }
12019         return ret;
12020     }
12021 #endif
12022 #ifdef TARGET_NR_clock_getres
12023     case TARGET_NR_clock_getres:
12024     {
12025         struct timespec ts;
12026         ret = get_errno(clock_getres(arg1, &ts));
12027         if (!is_error(ret)) {
12028             host_to_target_timespec(arg2, &ts);
12029         }
12030         return ret;
12031     }
12032 #endif
12033 #ifdef TARGET_NR_clock_getres_time64
12034     case TARGET_NR_clock_getres_time64:
12035     {
12036         struct timespec ts;
12037         ret = get_errno(clock_getres(arg1, &ts));
12038         if (!is_error(ret)) {
12039             host_to_target_timespec64(arg2, &ts);
12040         }
12041         return ret;
12042     }
12043 #endif
12044 #ifdef TARGET_NR_clock_nanosleep
12045     case TARGET_NR_clock_nanosleep:
12046     {
12047         struct timespec ts;
12048         if (target_to_host_timespec(&ts, arg3)) {
12049             return -TARGET_EFAULT;
12050         }
12051         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12052                                              &ts, arg4 ? &ts : NULL));
12053         /*
12054          * if the call is interrupted by a signal handler, it fails
12055          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12056          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12057          */
12058         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12059             host_to_target_timespec(arg4, &ts)) {
12060               return -TARGET_EFAULT;
12061         }
12062 
12063         return ret;
12064     }
12065 #endif
12066 #ifdef TARGET_NR_clock_nanosleep_time64
12067     case TARGET_NR_clock_nanosleep_time64:
12068     {
12069         struct timespec ts;
12070 
12071         if (target_to_host_timespec64(&ts, arg3)) {
12072             return -TARGET_EFAULT;
12073         }
12074 
12075         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12076                                              &ts, arg4 ? &ts : NULL));
12077 
12078         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12079             host_to_target_timespec64(arg4, &ts)) {
12080             return -TARGET_EFAULT;
12081         }
12082         return ret;
12083     }
12084 #endif
12085 
12086 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12087     case TARGET_NR_set_tid_address:
12088         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12089 #endif
12090 
12091     case TARGET_NR_tkill:
12092         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12093 
12094     case TARGET_NR_tgkill:
12095         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12096                          target_to_host_signal(arg3)));
12097 
12098 #ifdef TARGET_NR_set_robust_list
12099     case TARGET_NR_set_robust_list:
12100     case TARGET_NR_get_robust_list:
12101         /* The ABI for supporting robust futexes has userspace pass
12102          * the kernel a pointer to a linked list which is updated by
12103          * userspace after the syscall; the list is walked by the kernel
12104          * when the thread exits. Since the linked list in QEMU guest
12105          * memory isn't a valid linked list for the host and we have
12106          * no way to reliably intercept the thread-death event, we can't
12107          * support these. Silently return ENOSYS so that guest userspace
12108          * falls back to a non-robust futex implementation (which should
12109          * be OK except in the corner case of the guest crashing while
12110          * holding a mutex that is shared with another process via
12111          * shared memory).
12112          */
12113         return -TARGET_ENOSYS;
12114 #endif
12115 
12116 #if defined(TARGET_NR_utimensat)
12117     case TARGET_NR_utimensat:
12118         {
12119             struct timespec *tsp, ts[2];
12120             if (!arg3) {
12121                 tsp = NULL;
12122             } else {
12123                 if (target_to_host_timespec(ts, arg3)) {
12124                     return -TARGET_EFAULT;
12125                 }
12126                 if (target_to_host_timespec(ts + 1, arg3 +
12127                                             sizeof(struct target_timespec))) {
12128                     return -TARGET_EFAULT;
12129                 }
12130                 tsp = ts;
12131             }
12132             if (!arg2)
12133                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12134             else {
12135                 if (!(p = lock_user_string(arg2))) {
12136                     return -TARGET_EFAULT;
12137                 }
12138                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12139                 unlock_user(p, arg2, 0);
12140             }
12141         }
12142         return ret;
12143 #endif
12144 #ifdef TARGET_NR_utimensat_time64
12145     case TARGET_NR_utimensat_time64:
12146         {
12147             struct timespec *tsp, ts[2];
12148             if (!arg3) {
12149                 tsp = NULL;
12150             } else {
12151                 if (target_to_host_timespec64(ts, arg3)) {
12152                     return -TARGET_EFAULT;
12153                 }
12154                 if (target_to_host_timespec64(ts + 1, arg3 +
12155                                      sizeof(struct target__kernel_timespec))) {
12156                     return -TARGET_EFAULT;
12157                 }
12158                 tsp = ts;
12159             }
12160             if (!arg2)
12161                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12162             else {
12163                 p = lock_user_string(arg2);
12164                 if (!p) {
12165                     return -TARGET_EFAULT;
12166                 }
12167                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12168                 unlock_user(p, arg2, 0);
12169             }
12170         }
12171         return ret;
12172 #endif
12173 #ifdef TARGET_NR_futex
12174     case TARGET_NR_futex:
12175         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12176 #endif
12177 #ifdef TARGET_NR_futex_time64
12178     case TARGET_NR_futex_time64:
12179         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12180 #endif
12181 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12182     case TARGET_NR_inotify_init:
12183         ret = get_errno(sys_inotify_init());
12184         if (ret >= 0) {
12185             fd_trans_register(ret, &target_inotify_trans);
12186         }
12187         return ret;
12188 #endif
12189 #ifdef CONFIG_INOTIFY1
12190 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12191     case TARGET_NR_inotify_init1:
12192         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12193                                           fcntl_flags_tbl)));
12194         if (ret >= 0) {
12195             fd_trans_register(ret, &target_inotify_trans);
12196         }
12197         return ret;
12198 #endif
12199 #endif
12200 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12201     case TARGET_NR_inotify_add_watch:
12202         p = lock_user_string(arg2);
12203         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12204         unlock_user(p, arg2, 0);
12205         return ret;
12206 #endif
12207 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12208     case TARGET_NR_inotify_rm_watch:
12209         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12210 #endif
12211 
12212 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12213     case TARGET_NR_mq_open:
12214         {
12215             struct mq_attr posix_mq_attr;
12216             struct mq_attr *pposix_mq_attr;
12217             int host_flags;
12218 
12219             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12220             pposix_mq_attr = NULL;
12221             if (arg4) {
12222                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12223                     return -TARGET_EFAULT;
12224                 }
12225                 pposix_mq_attr = &posix_mq_attr;
12226             }
12227             p = lock_user_string(arg1 - 1);
12228             if (!p) {
12229                 return -TARGET_EFAULT;
12230             }
12231             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12232             unlock_user (p, arg1, 0);
12233         }
12234         return ret;
12235 
12236     case TARGET_NR_mq_unlink:
12237         p = lock_user_string(arg1 - 1);
12238         if (!p) {
12239             return -TARGET_EFAULT;
12240         }
12241         ret = get_errno(mq_unlink(p));
12242         unlock_user (p, arg1, 0);
12243         return ret;
12244 
12245 #ifdef TARGET_NR_mq_timedsend
12246     case TARGET_NR_mq_timedsend:
12247         {
12248             struct timespec ts;
12249 
12250             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12251             if (arg5 != 0) {
12252                 if (target_to_host_timespec(&ts, arg5)) {
12253                     return -TARGET_EFAULT;
12254                 }
12255                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12256                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12257                     return -TARGET_EFAULT;
12258                 }
12259             } else {
12260                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12261             }
12262             unlock_user (p, arg2, arg3);
12263         }
12264         return ret;
12265 #endif
12266 #ifdef TARGET_NR_mq_timedsend_time64
12267     case TARGET_NR_mq_timedsend_time64:
12268         {
12269             struct timespec ts;
12270 
12271             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12272             if (arg5 != 0) {
12273                 if (target_to_host_timespec64(&ts, arg5)) {
12274                     return -TARGET_EFAULT;
12275                 }
12276                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12277                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12278                     return -TARGET_EFAULT;
12279                 }
12280             } else {
12281                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12282             }
12283             unlock_user(p, arg2, arg3);
12284         }
12285         return ret;
12286 #endif
12287 
12288 #ifdef TARGET_NR_mq_timedreceive
12289     case TARGET_NR_mq_timedreceive:
12290         {
12291             struct timespec ts;
12292             unsigned int prio;
12293 
12294             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12295             if (arg5 != 0) {
12296                 if (target_to_host_timespec(&ts, arg5)) {
12297                     return -TARGET_EFAULT;
12298                 }
12299                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12300                                                      &prio, &ts));
12301                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12302                     return -TARGET_EFAULT;
12303                 }
12304             } else {
12305                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12306                                                      &prio, NULL));
12307             }
12308             unlock_user (p, arg2, arg3);
12309             if (arg4 != 0)
12310                 put_user_u32(prio, arg4);
12311         }
12312         return ret;
12313 #endif
12314 #ifdef TARGET_NR_mq_timedreceive_time64
12315     case TARGET_NR_mq_timedreceive_time64:
12316         {
12317             struct timespec ts;
12318             unsigned int prio;
12319 
12320             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12321             if (arg5 != 0) {
12322                 if (target_to_host_timespec64(&ts, arg5)) {
12323                     return -TARGET_EFAULT;
12324                 }
12325                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12326                                                      &prio, &ts));
12327                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12328                     return -TARGET_EFAULT;
12329                 }
12330             } else {
12331                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12332                                                      &prio, NULL));
12333             }
12334             unlock_user(p, arg2, arg3);
12335             if (arg4 != 0) {
12336                 put_user_u32(prio, arg4);
12337             }
12338         }
12339         return ret;
12340 #endif
12341 
12342     /* Not implemented for now... */
12343 /*     case TARGET_NR_mq_notify: */
12344 /*         break; */
12345 
12346     case TARGET_NR_mq_getsetattr:
12347         {
12348             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12349             ret = 0;
12350             if (arg2 != 0) {
12351                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12352                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12353                                            &posix_mq_attr_out));
12354             } else if (arg3 != 0) {
12355                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12356             }
12357             if (ret == 0 && arg3 != 0) {
12358                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12359             }
12360         }
12361         return ret;
12362 #endif
12363 
12364 #ifdef CONFIG_SPLICE
12365 #ifdef TARGET_NR_tee
12366     case TARGET_NR_tee:
12367         {
12368             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12369         }
12370         return ret;
12371 #endif
12372 #ifdef TARGET_NR_splice
12373     case TARGET_NR_splice:
12374         {
12375             loff_t loff_in, loff_out;
12376             loff_t *ploff_in = NULL, *ploff_out = NULL;
12377             if (arg2) {
12378                 if (get_user_u64(loff_in, arg2)) {
12379                     return -TARGET_EFAULT;
12380                 }
12381                 ploff_in = &loff_in;
12382             }
12383             if (arg4) {
12384                 if (get_user_u64(loff_out, arg4)) {
12385                     return -TARGET_EFAULT;
12386                 }
12387                 ploff_out = &loff_out;
12388             }
12389             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12390             if (arg2) {
12391                 if (put_user_u64(loff_in, arg2)) {
12392                     return -TARGET_EFAULT;
12393                 }
12394             }
12395             if (arg4) {
12396                 if (put_user_u64(loff_out, arg4)) {
12397                     return -TARGET_EFAULT;
12398                 }
12399             }
12400         }
12401         return ret;
12402 #endif
12403 #ifdef TARGET_NR_vmsplice
12404 	case TARGET_NR_vmsplice:
12405         {
12406             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12407             if (vec != NULL) {
12408                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12409                 unlock_iovec(vec, arg2, arg3, 0);
12410             } else {
12411                 ret = -host_to_target_errno(errno);
12412             }
12413         }
12414         return ret;
12415 #endif
12416 #endif /* CONFIG_SPLICE */
12417 #ifdef CONFIG_EVENTFD
12418 #if defined(TARGET_NR_eventfd)
12419     case TARGET_NR_eventfd:
12420         ret = get_errno(eventfd(arg1, 0));
12421         if (ret >= 0) {
12422             fd_trans_register(ret, &target_eventfd_trans);
12423         }
12424         return ret;
12425 #endif
12426 #if defined(TARGET_NR_eventfd2)
12427     case TARGET_NR_eventfd2:
12428     {
12429         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12430         if (arg2 & TARGET_O_NONBLOCK) {
12431             host_flags |= O_NONBLOCK;
12432         }
12433         if (arg2 & TARGET_O_CLOEXEC) {
12434             host_flags |= O_CLOEXEC;
12435         }
12436         ret = get_errno(eventfd(arg1, host_flags));
12437         if (ret >= 0) {
12438             fd_trans_register(ret, &target_eventfd_trans);
12439         }
12440         return ret;
12441     }
12442 #endif
12443 #endif /* CONFIG_EVENTFD  */
12444 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12445     case TARGET_NR_fallocate:
12446 #if TARGET_ABI_BITS == 32
12447         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12448                                   target_offset64(arg5, arg6)));
12449 #else
12450         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12451 #endif
12452         return ret;
12453 #endif
12454 #if defined(CONFIG_SYNC_FILE_RANGE)
12455 #if defined(TARGET_NR_sync_file_range)
12456     case TARGET_NR_sync_file_range:
12457 #if TARGET_ABI_BITS == 32
12458 #if defined(TARGET_MIPS)
12459         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12460                                         target_offset64(arg5, arg6), arg7));
12461 #else
12462         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12463                                         target_offset64(arg4, arg5), arg6));
12464 #endif /* !TARGET_MIPS */
12465 #else
12466         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12467 #endif
12468         return ret;
12469 #endif
12470 #if defined(TARGET_NR_sync_file_range2) || \
12471     defined(TARGET_NR_arm_sync_file_range)
12472 #if defined(TARGET_NR_sync_file_range2)
12473     case TARGET_NR_sync_file_range2:
12474 #endif
12475 #if defined(TARGET_NR_arm_sync_file_range)
12476     case TARGET_NR_arm_sync_file_range:
12477 #endif
12478         /* This is like sync_file_range but the arguments are reordered */
12479 #if TARGET_ABI_BITS == 32
12480         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12481                                         target_offset64(arg5, arg6), arg2));
12482 #else
12483         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12484 #endif
12485         return ret;
12486 #endif
12487 #endif
12488 #if defined(TARGET_NR_signalfd4)
12489     case TARGET_NR_signalfd4:
12490         return do_signalfd4(arg1, arg2, arg4);
12491 #endif
12492 #if defined(TARGET_NR_signalfd)
12493     case TARGET_NR_signalfd:
12494         return do_signalfd4(arg1, arg2, 0);
12495 #endif
12496 #if defined(CONFIG_EPOLL)
12497 #if defined(TARGET_NR_epoll_create)
12498     case TARGET_NR_epoll_create:
12499         return get_errno(epoll_create(arg1));
12500 #endif
12501 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12502     case TARGET_NR_epoll_create1:
12503         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12504 #endif
12505 #if defined(TARGET_NR_epoll_ctl)
12506     case TARGET_NR_epoll_ctl:
12507     {
12508         struct epoll_event ep;
12509         struct epoll_event *epp = 0;
12510         if (arg4) {
12511             if (arg2 != EPOLL_CTL_DEL) {
12512                 struct target_epoll_event *target_ep;
12513                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12514                     return -TARGET_EFAULT;
12515                 }
12516                 ep.events = tswap32(target_ep->events);
12517                 /*
12518                  * The epoll_data_t union is just opaque data to the kernel,
12519                  * so we transfer all 64 bits across and need not worry what
12520                  * actual data type it is.
12521                  */
12522                 ep.data.u64 = tswap64(target_ep->data.u64);
12523                 unlock_user_struct(target_ep, arg4, 0);
12524             }
12525             /*
12526              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12527              * non-null pointer, even though this argument is ignored.
12528              *
12529              */
12530             epp = &ep;
12531         }
12532         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12533     }
12534 #endif
12535 
12536 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12537 #if defined(TARGET_NR_epoll_wait)
12538     case TARGET_NR_epoll_wait:
12539 #endif
12540 #if defined(TARGET_NR_epoll_pwait)
12541     case TARGET_NR_epoll_pwait:
12542 #endif
12543     {
12544         struct target_epoll_event *target_ep;
12545         struct epoll_event *ep;
12546         int epfd = arg1;
12547         int maxevents = arg3;
12548         int timeout = arg4;
12549 
12550         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12551             return -TARGET_EINVAL;
12552         }
12553 
12554         target_ep = lock_user(VERIFY_WRITE, arg2,
12555                               maxevents * sizeof(struct target_epoll_event), 1);
12556         if (!target_ep) {
12557             return -TARGET_EFAULT;
12558         }
12559 
12560         ep = g_try_new(struct epoll_event, maxevents);
12561         if (!ep) {
12562             unlock_user(target_ep, arg2, 0);
12563             return -TARGET_ENOMEM;
12564         }
12565 
12566         switch (num) {
12567 #if defined(TARGET_NR_epoll_pwait)
12568         case TARGET_NR_epoll_pwait:
12569         {
12570             target_sigset_t *target_set;
12571             sigset_t _set, *set = &_set;
12572 
12573             if (arg5) {
12574                 if (arg6 != sizeof(target_sigset_t)) {
12575                     ret = -TARGET_EINVAL;
12576                     break;
12577                 }
12578 
12579                 target_set = lock_user(VERIFY_READ, arg5,
12580                                        sizeof(target_sigset_t), 1);
12581                 if (!target_set) {
12582                     ret = -TARGET_EFAULT;
12583                     break;
12584                 }
12585                 target_to_host_sigset(set, target_set);
12586                 unlock_user(target_set, arg5, 0);
12587             } else {
12588                 set = NULL;
12589             }
12590 
12591             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12592                                              set, SIGSET_T_SIZE));
12593             break;
12594         }
12595 #endif
12596 #if defined(TARGET_NR_epoll_wait)
12597         case TARGET_NR_epoll_wait:
12598             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12599                                              NULL, 0));
12600             break;
12601 #endif
12602         default:
12603             ret = -TARGET_ENOSYS;
12604         }
12605         if (!is_error(ret)) {
12606             int i;
12607             for (i = 0; i < ret; i++) {
12608                 target_ep[i].events = tswap32(ep[i].events);
12609                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12610             }
12611             unlock_user(target_ep, arg2,
12612                         ret * sizeof(struct target_epoll_event));
12613         } else {
12614             unlock_user(target_ep, arg2, 0);
12615         }
12616         g_free(ep);
12617         return ret;
12618     }
12619 #endif
12620 #endif
12621 #ifdef TARGET_NR_prlimit64
12622     case TARGET_NR_prlimit64:
12623     {
12624         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12625         struct target_rlimit64 *target_rnew, *target_rold;
12626         struct host_rlimit64 rnew, rold, *rnewp = 0;
12627         int resource = target_to_host_resource(arg2);
12628 
12629         if (arg3 && (resource != RLIMIT_AS &&
12630                      resource != RLIMIT_DATA &&
12631                      resource != RLIMIT_STACK)) {
12632             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12633                 return -TARGET_EFAULT;
12634             }
12635             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12636             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12637             unlock_user_struct(target_rnew, arg3, 0);
12638             rnewp = &rnew;
12639         }
12640 
12641         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12642         if (!is_error(ret) && arg4) {
12643             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12644                 return -TARGET_EFAULT;
12645             }
12646             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12647             target_rold->rlim_max = tswap64(rold.rlim_max);
12648             unlock_user_struct(target_rold, arg4, 1);
12649         }
12650         return ret;
12651     }
12652 #endif
12653 #ifdef TARGET_NR_gethostname
12654     case TARGET_NR_gethostname:
12655     {
12656         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12657         if (name) {
12658             ret = get_errno(gethostname(name, arg2));
12659             unlock_user(name, arg1, arg2);
12660         } else {
12661             ret = -TARGET_EFAULT;
12662         }
12663         return ret;
12664     }
12665 #endif
12666 #ifdef TARGET_NR_atomic_cmpxchg_32
12667     case TARGET_NR_atomic_cmpxchg_32:
12668     {
12669         /* should use start_exclusive from main.c */
12670         abi_ulong mem_value;
12671         if (get_user_u32(mem_value, arg6)) {
12672             target_siginfo_t info;
12673             info.si_signo = SIGSEGV;
12674             info.si_errno = 0;
12675             info.si_code = TARGET_SEGV_MAPERR;
12676             info._sifields._sigfault._addr = arg6;
12677             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12678                          QEMU_SI_FAULT, &info);
12679             ret = 0xdeadbeef;
12680 
12681         }
12682         if (mem_value == arg2)
12683             put_user_u32(arg1, arg6);
12684         return mem_value;
12685     }
12686 #endif
12687 #ifdef TARGET_NR_atomic_barrier
12688     case TARGET_NR_atomic_barrier:
12689         /* Like the kernel implementation and the
12690            qemu arm barrier, no-op this? */
12691         return 0;
12692 #endif
12693 
12694 #ifdef TARGET_NR_timer_create
12695     case TARGET_NR_timer_create:
12696     {
12697         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12698 
12699         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12700 
12701         int clkid = arg1;
12702         int timer_index = next_free_host_timer();
12703 
12704         if (timer_index < 0) {
12705             ret = -TARGET_EAGAIN;
12706         } else {
12707             timer_t *phtimer = g_posix_timers  + timer_index;
12708 
12709             if (arg2) {
12710                 phost_sevp = &host_sevp;
12711                 ret = target_to_host_sigevent(phost_sevp, arg2);
12712                 if (ret != 0) {
12713                     return ret;
12714                 }
12715             }
12716 
12717             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12718             if (ret) {
12719                 phtimer = NULL;
12720             } else {
12721                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12722                     return -TARGET_EFAULT;
12723                 }
12724             }
12725         }
12726         return ret;
12727     }
12728 #endif
12729 
12730 #ifdef TARGET_NR_timer_settime
12731     case TARGET_NR_timer_settime:
12732     {
12733         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12734          * struct itimerspec * old_value */
12735         target_timer_t timerid = get_timer_id(arg1);
12736 
12737         if (timerid < 0) {
12738             ret = timerid;
12739         } else if (arg3 == 0) {
12740             ret = -TARGET_EINVAL;
12741         } else {
12742             timer_t htimer = g_posix_timers[timerid];
12743             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12744 
12745             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12746                 return -TARGET_EFAULT;
12747             }
12748             ret = get_errno(
12749                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12750             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12751                 return -TARGET_EFAULT;
12752             }
12753         }
12754         return ret;
12755     }
12756 #endif
12757 
12758 #ifdef TARGET_NR_timer_settime64
12759     case TARGET_NR_timer_settime64:
12760     {
12761         target_timer_t timerid = get_timer_id(arg1);
12762 
12763         if (timerid < 0) {
12764             ret = timerid;
12765         } else if (arg3 == 0) {
12766             ret = -TARGET_EINVAL;
12767         } else {
12768             timer_t htimer = g_posix_timers[timerid];
12769             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12770 
12771             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12772                 return -TARGET_EFAULT;
12773             }
12774             ret = get_errno(
12775                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12776             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12777                 return -TARGET_EFAULT;
12778             }
12779         }
12780         return ret;
12781     }
12782 #endif
12783 
12784 #ifdef TARGET_NR_timer_gettime
12785     case TARGET_NR_timer_gettime:
12786     {
12787         /* args: timer_t timerid, struct itimerspec *curr_value */
12788         target_timer_t timerid = get_timer_id(arg1);
12789 
12790         if (timerid < 0) {
12791             ret = timerid;
12792         } else if (!arg2) {
12793             ret = -TARGET_EFAULT;
12794         } else {
12795             timer_t htimer = g_posix_timers[timerid];
12796             struct itimerspec hspec;
12797             ret = get_errno(timer_gettime(htimer, &hspec));
12798 
12799             if (host_to_target_itimerspec(arg2, &hspec)) {
12800                 ret = -TARGET_EFAULT;
12801             }
12802         }
12803         return ret;
12804     }
12805 #endif
12806 
12807 #ifdef TARGET_NR_timer_gettime64
12808     case TARGET_NR_timer_gettime64:
12809     {
12810         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12811         target_timer_t timerid = get_timer_id(arg1);
12812 
12813         if (timerid < 0) {
12814             ret = timerid;
12815         } else if (!arg2) {
12816             ret = -TARGET_EFAULT;
12817         } else {
12818             timer_t htimer = g_posix_timers[timerid];
12819             struct itimerspec hspec;
12820             ret = get_errno(timer_gettime(htimer, &hspec));
12821 
12822             if (host_to_target_itimerspec64(arg2, &hspec)) {
12823                 ret = -TARGET_EFAULT;
12824             }
12825         }
12826         return ret;
12827     }
12828 #endif
12829 
12830 #ifdef TARGET_NR_timer_getoverrun
12831     case TARGET_NR_timer_getoverrun:
12832     {
12833         /* args: timer_t timerid */
12834         target_timer_t timerid = get_timer_id(arg1);
12835 
12836         if (timerid < 0) {
12837             ret = timerid;
12838         } else {
12839             timer_t htimer = g_posix_timers[timerid];
12840             ret = get_errno(timer_getoverrun(htimer));
12841         }
12842         return ret;
12843     }
12844 #endif
12845 
12846 #ifdef TARGET_NR_timer_delete
12847     case TARGET_NR_timer_delete:
12848     {
12849         /* args: timer_t timerid */
12850         target_timer_t timerid = get_timer_id(arg1);
12851 
12852         if (timerid < 0) {
12853             ret = timerid;
12854         } else {
12855             timer_t htimer = g_posix_timers[timerid];
12856             ret = get_errno(timer_delete(htimer));
12857             g_posix_timers[timerid] = 0;
12858         }
12859         return ret;
12860     }
12861 #endif
12862 
12863 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12864     case TARGET_NR_timerfd_create:
12865         return get_errno(timerfd_create(arg1,
12866                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12867 #endif
12868 
12869 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12870     case TARGET_NR_timerfd_gettime:
12871         {
12872             struct itimerspec its_curr;
12873 
12874             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12875 
12876             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12877                 return -TARGET_EFAULT;
12878             }
12879         }
12880         return ret;
12881 #endif
12882 
12883 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12884     case TARGET_NR_timerfd_gettime64:
12885         {
12886             struct itimerspec its_curr;
12887 
12888             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12889 
12890             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12891                 return -TARGET_EFAULT;
12892             }
12893         }
12894         return ret;
12895 #endif
12896 
12897 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12898     case TARGET_NR_timerfd_settime:
12899         {
12900             struct itimerspec its_new, its_old, *p_new;
12901 
12902             if (arg3) {
12903                 if (target_to_host_itimerspec(&its_new, arg3)) {
12904                     return -TARGET_EFAULT;
12905                 }
12906                 p_new = &its_new;
12907             } else {
12908                 p_new = NULL;
12909             }
12910 
12911             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12912 
12913             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12914                 return -TARGET_EFAULT;
12915             }
12916         }
12917         return ret;
12918 #endif
12919 
12920 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12921     case TARGET_NR_timerfd_settime64:
12922         {
12923             struct itimerspec its_new, its_old, *p_new;
12924 
12925             if (arg3) {
12926                 if (target_to_host_itimerspec64(&its_new, arg3)) {
12927                     return -TARGET_EFAULT;
12928                 }
12929                 p_new = &its_new;
12930             } else {
12931                 p_new = NULL;
12932             }
12933 
12934             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12935 
12936             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12937                 return -TARGET_EFAULT;
12938             }
12939         }
12940         return ret;
12941 #endif
12942 
12943 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12944     case TARGET_NR_ioprio_get:
12945         return get_errno(ioprio_get(arg1, arg2));
12946 #endif
12947 
12948 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12949     case TARGET_NR_ioprio_set:
12950         return get_errno(ioprio_set(arg1, arg2, arg3));
12951 #endif
12952 
12953 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12954     case TARGET_NR_setns:
12955         return get_errno(setns(arg1, arg2));
12956 #endif
12957 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12958     case TARGET_NR_unshare:
12959         return get_errno(unshare(arg1));
12960 #endif
12961 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12962     case TARGET_NR_kcmp:
12963         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12964 #endif
12965 #ifdef TARGET_NR_swapcontext
12966     case TARGET_NR_swapcontext:
12967         /* PowerPC specific.  */
12968         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12969 #endif
12970 #ifdef TARGET_NR_memfd_create
12971     case TARGET_NR_memfd_create:
12972         p = lock_user_string(arg1);
12973         if (!p) {
12974             return -TARGET_EFAULT;
12975         }
12976         ret = get_errno(memfd_create(p, arg2));
12977         fd_trans_unregister(ret);
12978         unlock_user(p, arg1, 0);
12979         return ret;
12980 #endif
12981 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12982     case TARGET_NR_membarrier:
12983         return get_errno(membarrier(arg1, arg2));
12984 #endif
12985 
12986 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
12987     case TARGET_NR_copy_file_range:
12988         {
12989             loff_t inoff, outoff;
12990             loff_t *pinoff = NULL, *poutoff = NULL;
12991 
12992             if (arg2) {
12993                 if (get_user_u64(inoff, arg2)) {
12994                     return -TARGET_EFAULT;
12995                 }
12996                 pinoff = &inoff;
12997             }
12998             if (arg4) {
12999                 if (get_user_u64(outoff, arg4)) {
13000                     return -TARGET_EFAULT;
13001                 }
13002                 poutoff = &outoff;
13003             }
13004             /* Do not sign-extend the count parameter. */
13005             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13006                                                  (abi_ulong)arg5, arg6));
13007             if (!is_error(ret) && ret > 0) {
13008                 if (arg2) {
13009                     if (put_user_u64(inoff, arg2)) {
13010                         return -TARGET_EFAULT;
13011                     }
13012                 }
13013                 if (arg4) {
13014                     if (put_user_u64(outoff, arg4)) {
13015                         return -TARGET_EFAULT;
13016                     }
13017                 }
13018             }
13019         }
13020         return ret;
13021 #endif
13022 
13023 #if defined(TARGET_NR_pivot_root)
13024     case TARGET_NR_pivot_root:
13025         {
13026             void *p2;
13027             p = lock_user_string(arg1); /* new_root */
13028             p2 = lock_user_string(arg2); /* put_old */
13029             if (!p || !p2) {
13030                 ret = -TARGET_EFAULT;
13031             } else {
13032                 ret = get_errno(pivot_root(p, p2));
13033             }
13034             unlock_user(p2, arg2, 0);
13035             unlock_user(p, arg1, 0);
13036         }
13037         return ret;
13038 #endif
13039 
13040     default:
13041         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13042         return -TARGET_ENOSYS;
13043     }
13044     return ret;
13045 }
13046 
13047 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13048                     abi_long arg2, abi_long arg3, abi_long arg4,
13049                     abi_long arg5, abi_long arg6, abi_long arg7,
13050                     abi_long arg8)
13051 {
13052     CPUState *cpu = env_cpu(cpu_env);
13053     abi_long ret;
13054 
13055 #ifdef DEBUG_ERESTARTSYS
13056     /* Debug-only code for exercising the syscall-restart code paths
13057      * in the per-architecture cpu main loops: restart every syscall
13058      * the guest makes once before letting it through.
13059      */
13060     {
13061         static bool flag;
13062         flag = !flag;
13063         if (flag) {
13064             return -QEMU_ERESTARTSYS;
13065         }
13066     }
13067 #endif
13068 
13069     record_syscall_start(cpu, num, arg1,
13070                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13071 
13072     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13073         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13074     }
13075 
13076     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13077                       arg5, arg6, arg7, arg8);
13078 
13079     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13080         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13081                           arg3, arg4, arg5, arg6);
13082     }
13083 
13084     record_syscall_return(cpu, num, ret);
13085     return ret;
13086 }
13087