xref: /openbmc/qemu/linux-user/syscall.c (revision 6e8dcacd)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_inotify_init __NR_inotify_init
276 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
277 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
278 #define __NR_sys_statx __NR_statx
279 
280 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
281 #define __NR__llseek __NR_lseek
282 #endif
283 
284 /* Newer kernel ports have llseek() instead of _llseek() */
285 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
286 #define TARGET_NR__llseek TARGET_NR_llseek
287 #endif
288 
289 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
290 #ifndef TARGET_O_NONBLOCK_MASK
291 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
292 #endif
293 
294 #define __NR_sys_gettid __NR_gettid
295 _syscall0(int, sys_gettid)
296 
297 /* For the 64-bit guest on 32-bit host case we must emulate
298  * getdents using getdents64, because otherwise the host
299  * might hand us back more dirent records than we can fit
300  * into the guest buffer after structure format conversion.
301  * Otherwise we emulate getdents with getdents if the host has it.
302  */
303 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
304 #define EMULATE_GETDENTS_WITH_GETDENTS
305 #endif
306 
307 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
308 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
309 #endif
310 #if (defined(TARGET_NR_getdents) && \
311       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
312     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
313 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
314 #endif
315 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
316 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
317           loff_t *, res, uint, wh);
318 #endif
319 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
320 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
321           siginfo_t *, uinfo)
322 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
323 #ifdef __NR_exit_group
324 _syscall1(int,exit_group,int,error_code)
325 #endif
326 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
327 _syscall1(int,set_tid_address,int *,tidptr)
328 #endif
329 #if defined(__NR_futex)
330 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
331           const struct timespec *,timeout,int *,uaddr2,int,val3)
332 #endif
333 #if defined(__NR_futex_time64)
334 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
335           const struct timespec *,timeout,int *,uaddr2,int,val3)
336 #endif
337 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
338 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
341 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
342           unsigned long *, user_mask_ptr);
343 #define __NR_sys_getcpu __NR_getcpu
344 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
345 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
346           void *, arg);
347 _syscall2(int, capget, struct __user_cap_header_struct *, header,
348           struct __user_cap_data_struct *, data);
349 _syscall2(int, capset, struct __user_cap_header_struct *, header,
350           struct __user_cap_data_struct *, data);
351 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
352 _syscall2(int, ioprio_get, int, which, int, who)
353 #endif
354 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
355 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
356 #endif
357 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
358 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
359 #endif
360 
361 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
362 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
363           unsigned long, idx1, unsigned long, idx2)
364 #endif
365 
366 /*
367  * It is assumed that struct statx is architecture independent.
368  */
369 #if defined(TARGET_NR_statx) && defined(__NR_statx)
370 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
371           unsigned int, mask, struct target_statx *, statxbuf)
372 #endif
373 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
374 _syscall2(int, membarrier, int, cmd, int, flags)
375 #endif
376 
377 static const bitmask_transtbl fcntl_flags_tbl[] = {
378   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
379   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
380   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
381   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
382   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
383   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
384   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
385   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
386   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
387   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
388   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
389   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
390   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
391 #if defined(O_DIRECT)
392   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
393 #endif
394 #if defined(O_NOATIME)
395   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
396 #endif
397 #if defined(O_CLOEXEC)
398   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
399 #endif
400 #if defined(O_PATH)
401   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
402 #endif
403 #if defined(O_TMPFILE)
404   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
405 #endif
406   /* Don't terminate the list prematurely on 64-bit host+guest.  */
407 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
408   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
409 #endif
410   { 0, 0, 0, 0 }
411 };
412 
413 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
414 
415 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
416 #if defined(__NR_utimensat)
417 #define __NR_sys_utimensat __NR_utimensat
418 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
419           const struct timespec *,tsp,int,flags)
420 #else
421 static int sys_utimensat(int dirfd, const char *pathname,
422                          const struct timespec times[2], int flags)
423 {
424     errno = ENOSYS;
425     return -1;
426 }
427 #endif
428 #endif /* TARGET_NR_utimensat */
429 
430 #ifdef TARGET_NR_renameat2
431 #if defined(__NR_renameat2)
432 #define __NR_sys_renameat2 __NR_renameat2
433 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
434           const char *, new, unsigned int, flags)
435 #else
436 static int sys_renameat2(int oldfd, const char *old,
437                          int newfd, const char *new, int flags)
438 {
439     if (flags == 0) {
440         return renameat(oldfd, old, newfd, new);
441     }
442     errno = ENOSYS;
443     return -1;
444 }
445 #endif
446 #endif /* TARGET_NR_renameat2 */
447 
448 #ifdef CONFIG_INOTIFY
449 #include <sys/inotify.h>
450 
451 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
452 static int sys_inotify_init(void)
453 {
454   return (inotify_init());
455 }
456 #endif
457 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
458 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
459 {
460   return (inotify_add_watch(fd, pathname, mask));
461 }
462 #endif
463 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
464 static int sys_inotify_rm_watch(int fd, int32_t wd)
465 {
466   return (inotify_rm_watch(fd, wd));
467 }
468 #endif
469 #ifdef CONFIG_INOTIFY1
470 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
471 static int sys_inotify_init1(int flags)
472 {
473   return (inotify_init1(flags));
474 }
475 #endif
476 #endif
477 #else
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY  */
484 
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
488 #endif
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64 {
492     uint64_t rlim_cur;
493     uint64_t rlim_max;
494 };
495 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
496           const struct host_rlimit64 *, new_limit,
497           struct host_rlimit64 *, old_limit)
498 #endif
499 
500 
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers[32] = { 0, } ;
504 
505 static inline int next_free_host_timer(void)
506 {
507     int k ;
508     /* FIXME: Does finding the next free slot require a lock? */
509     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
510         if (g_posix_timers[k] == 0) {
511             g_posix_timers[k] = (timer_t) 1;
512             return k;
513         }
514     }
515     return -1;
516 }
517 #endif
518 
519 static inline int host_to_target_errno(int host_errno)
520 {
521     switch (host_errno) {
522 #define E(X)  case X: return TARGET_##X;
523 #include "errnos.c.inc"
524 #undef E
525     default:
526         return host_errno;
527     }
528 }
529 
530 static inline int target_to_host_errno(int target_errno)
531 {
532     switch (target_errno) {
533 #define E(X)  case TARGET_##X: return X;
534 #include "errnos.c.inc"
535 #undef E
536     default:
537         return target_errno;
538     }
539 }
540 
541 static inline abi_long get_errno(abi_long ret)
542 {
543     if (ret == -1)
544         return -host_to_target_errno(errno);
545     else
546         return ret;
547 }
548 
549 const char *target_strerror(int err)
550 {
551     if (err == QEMU_ERESTARTSYS) {
552         return "To be restarted";
553     }
554     if (err == QEMU_ESIGRETURN) {
555         return "Successful exit from sigreturn";
556     }
557 
558     return strerror(target_to_host_errno(err));
559 }
560 
561 #define safe_syscall0(type, name) \
562 static type safe_##name(void) \
563 { \
564     return safe_syscall(__NR_##name); \
565 }
566 
567 #define safe_syscall1(type, name, type1, arg1) \
568 static type safe_##name(type1 arg1) \
569 { \
570     return safe_syscall(__NR_##name, arg1); \
571 }
572 
573 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
574 static type safe_##name(type1 arg1, type2 arg2) \
575 { \
576     return safe_syscall(__NR_##name, arg1, arg2); \
577 }
578 
579 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
580 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
581 { \
582     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
583 }
584 
585 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
586     type4, arg4) \
587 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
588 { \
589     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
590 }
591 
592 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
593     type4, arg4, type5, arg5) \
594 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
595     type5 arg5) \
596 { \
597     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
598 }
599 
600 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
601     type4, arg4, type5, arg5, type6, arg6) \
602 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
603     type5 arg5, type6 arg6) \
604 { \
605     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
606 }
607 
608 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
609 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
610 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
611               int, flags, mode_t, mode)
612 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
613 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
614               struct rusage *, rusage)
615 #endif
616 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
617               int, options, struct rusage *, rusage)
618 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
619 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
620     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
621 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
622               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
623 #endif
624 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
625 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
626               struct timespec *, tsp, const sigset_t *, sigmask,
627               size_t, sigsetsize)
628 #endif
629 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
630               int, maxevents, int, timeout, const sigset_t *, sigmask,
631               size_t, sigsetsize)
632 #if defined(__NR_futex)
633 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
634               const struct timespec *,timeout,int *,uaddr2,int,val3)
635 #endif
636 #if defined(__NR_futex_time64)
637 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
638               const struct timespec *,timeout,int *,uaddr2,int,val3)
639 #endif
640 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
641 safe_syscall2(int, kill, pid_t, pid, int, sig)
642 safe_syscall2(int, tkill, int, tid, int, sig)
643 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
644 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
645 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
646 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
647               unsigned long, pos_l, unsigned long, pos_h)
648 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
649               unsigned long, pos_l, unsigned long, pos_h)
650 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
651               socklen_t, addrlen)
652 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
653               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
654 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
655               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
656 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
657 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
658 safe_syscall2(int, flock, int, fd, int, operation)
659 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
660 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
661               const struct timespec *, uts, size_t, sigsetsize)
662 #endif
663 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
664               int, flags)
665 #if defined(TARGET_NR_nanosleep)
666 safe_syscall2(int, nanosleep, const struct timespec *, req,
667               struct timespec *, rem)
668 #endif
669 #if defined(TARGET_NR_clock_nanosleep) || \
670     defined(TARGET_NR_clock_nanosleep_time64)
671 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
672               const struct timespec *, req, struct timespec *, rem)
673 #endif
674 #ifdef __NR_ipc
675 #ifdef __s390x__
676 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
677               void *, ptr)
678 #else
679 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
680               void *, ptr, long, fifth)
681 #endif
682 #endif
683 #ifdef __NR_msgsnd
684 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
685               int, flags)
686 #endif
687 #ifdef __NR_msgrcv
688 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
689               long, msgtype, int, flags)
690 #endif
691 #ifdef __NR_semtimedop
692 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
693               unsigned, nsops, const struct timespec *, timeout)
694 #endif
695 #if defined(TARGET_NR_mq_timedsend) || \
696     defined(TARGET_NR_mq_timedsend_time64)
697 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
698               size_t, len, unsigned, prio, const struct timespec *, timeout)
699 #endif
700 #if defined(TARGET_NR_mq_timedreceive) || \
701     defined(TARGET_NR_mq_timedreceive_time64)
702 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
703               size_t, len, unsigned *, prio, const struct timespec *, timeout)
704 #endif
705 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
706 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
707               int, outfd, loff_t *, poutoff, size_t, length,
708               unsigned int, flags)
709 #endif
710 
711 /* We do ioctl like this rather than via safe_syscall3 to preserve the
712  * "third argument might be integer or pointer or not present" behaviour of
713  * the libc function.
714  */
715 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
716 /* Similarly for fcntl. Note that callers must always:
717  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
718  *  use the flock64 struct rather than unsuffixed flock
719  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
720  */
721 #ifdef __NR_fcntl64
722 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
723 #else
724 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
725 #endif
726 
727 static inline int host_to_target_sock_type(int host_type)
728 {
729     int target_type;
730 
731     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
732     case SOCK_DGRAM:
733         target_type = TARGET_SOCK_DGRAM;
734         break;
735     case SOCK_STREAM:
736         target_type = TARGET_SOCK_STREAM;
737         break;
738     default:
739         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
740         break;
741     }
742 
743 #if defined(SOCK_CLOEXEC)
744     if (host_type & SOCK_CLOEXEC) {
745         target_type |= TARGET_SOCK_CLOEXEC;
746     }
747 #endif
748 
749 #if defined(SOCK_NONBLOCK)
750     if (host_type & SOCK_NONBLOCK) {
751         target_type |= TARGET_SOCK_NONBLOCK;
752     }
753 #endif
754 
755     return target_type;
756 }
757 
758 static abi_ulong target_brk;
759 static abi_ulong target_original_brk;
760 static abi_ulong brk_page;
761 
762 void target_set_brk(abi_ulong new_brk)
763 {
764     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
765     brk_page = HOST_PAGE_ALIGN(target_brk);
766 }
767 
768 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
769 #define DEBUGF_BRK(message, args...)
770 
771 /* do_brk() must return target values and target errnos. */
772 abi_long do_brk(abi_ulong new_brk)
773 {
774     abi_long mapped_addr;
775     abi_ulong new_alloc_size;
776 
777     /* brk pointers are always untagged */
778 
779     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
780 
781     if (!new_brk) {
782         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
783         return target_brk;
784     }
785     if (new_brk < target_original_brk) {
786         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
787                    target_brk);
788         return target_brk;
789     }
790 
791     /* If the new brk is less than the highest page reserved to the
792      * target heap allocation, set it and we're almost done...  */
793     if (new_brk <= brk_page) {
794         /* Heap contents are initialized to zero, as for anonymous
795          * mapped pages.  */
796         if (new_brk > target_brk) {
797             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
798         }
799 	target_brk = new_brk;
800         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
801 	return target_brk;
802     }
803 
804     /* We need to allocate more memory after the brk... Note that
805      * we don't use MAP_FIXED because that will map over the top of
806      * any existing mapping (like the one with the host libc or qemu
807      * itself); instead we treat "mapped but at wrong address" as
808      * a failure and unmap again.
809      */
810     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
811     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
812                                         PROT_READ|PROT_WRITE,
813                                         MAP_ANON|MAP_PRIVATE, 0, 0));
814 
815     if (mapped_addr == brk_page) {
816         /* Heap contents are initialized to zero, as for anonymous
817          * mapped pages.  Technically the new pages are already
818          * initialized to zero since they *are* anonymous mapped
819          * pages, however we have to take care with the contents that
820          * come from the remaining part of the previous page: it may
821          * contains garbage data due to a previous heap usage (grown
822          * then shrunken).  */
823         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
824 
825         target_brk = new_brk;
826         brk_page = HOST_PAGE_ALIGN(target_brk);
827         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
828             target_brk);
829         return target_brk;
830     } else if (mapped_addr != -1) {
831         /* Mapped but at wrong address, meaning there wasn't actually
832          * enough space for this brk.
833          */
834         target_munmap(mapped_addr, new_alloc_size);
835         mapped_addr = -1;
836         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
837     }
838     else {
839         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
840     }
841 
842 #if defined(TARGET_ALPHA)
843     /* We (partially) emulate OSF/1 on Alpha, which requires we
844        return a proper errno, not an unchanged brk value.  */
845     return -TARGET_ENOMEM;
846 #endif
847     /* For everything else, return the previous break. */
848     return target_brk;
849 }
850 
851 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
852     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
853 static inline abi_long copy_from_user_fdset(fd_set *fds,
854                                             abi_ulong target_fds_addr,
855                                             int n)
856 {
857     int i, nw, j, k;
858     abi_ulong b, *target_fds;
859 
860     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
861     if (!(target_fds = lock_user(VERIFY_READ,
862                                  target_fds_addr,
863                                  sizeof(abi_ulong) * nw,
864                                  1)))
865         return -TARGET_EFAULT;
866 
867     FD_ZERO(fds);
868     k = 0;
869     for (i = 0; i < nw; i++) {
870         /* grab the abi_ulong */
871         __get_user(b, &target_fds[i]);
872         for (j = 0; j < TARGET_ABI_BITS; j++) {
873             /* check the bit inside the abi_ulong */
874             if ((b >> j) & 1)
875                 FD_SET(k, fds);
876             k++;
877         }
878     }
879 
880     unlock_user(target_fds, target_fds_addr, 0);
881 
882     return 0;
883 }
884 
885 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
886                                                  abi_ulong target_fds_addr,
887                                                  int n)
888 {
889     if (target_fds_addr) {
890         if (copy_from_user_fdset(fds, target_fds_addr, n))
891             return -TARGET_EFAULT;
892         *fds_ptr = fds;
893     } else {
894         *fds_ptr = NULL;
895     }
896     return 0;
897 }
898 
899 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
900                                           const fd_set *fds,
901                                           int n)
902 {
903     int i, nw, j, k;
904     abi_long v;
905     abi_ulong *target_fds;
906 
907     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
908     if (!(target_fds = lock_user(VERIFY_WRITE,
909                                  target_fds_addr,
910                                  sizeof(abi_ulong) * nw,
911                                  0)))
912         return -TARGET_EFAULT;
913 
914     k = 0;
915     for (i = 0; i < nw; i++) {
916         v = 0;
917         for (j = 0; j < TARGET_ABI_BITS; j++) {
918             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
919             k++;
920         }
921         __put_user(v, &target_fds[i]);
922     }
923 
924     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
925 
926     return 0;
927 }
928 #endif
929 
930 #if defined(__alpha__)
931 #define HOST_HZ 1024
932 #else
933 #define HOST_HZ 100
934 #endif
935 
936 static inline abi_long host_to_target_clock_t(long ticks)
937 {
938 #if HOST_HZ == TARGET_HZ
939     return ticks;
940 #else
941     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
942 #endif
943 }
944 
945 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
946                                              const struct rusage *rusage)
947 {
948     struct target_rusage *target_rusage;
949 
950     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
951         return -TARGET_EFAULT;
952     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
953     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
954     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
955     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
956     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
957     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
958     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
959     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
960     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
961     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
962     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
963     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
964     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
965     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
966     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
967     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
968     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
969     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
970     unlock_user_struct(target_rusage, target_addr, 1);
971 
972     return 0;
973 }
974 
975 #ifdef TARGET_NR_setrlimit
976 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
977 {
978     abi_ulong target_rlim_swap;
979     rlim_t result;
980 
981     target_rlim_swap = tswapal(target_rlim);
982     if (target_rlim_swap == TARGET_RLIM_INFINITY)
983         return RLIM_INFINITY;
984 
985     result = target_rlim_swap;
986     if (target_rlim_swap != (rlim_t)result)
987         return RLIM_INFINITY;
988 
989     return result;
990 }
991 #endif
992 
993 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
994 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
995 {
996     abi_ulong target_rlim_swap;
997     abi_ulong result;
998 
999     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1000         target_rlim_swap = TARGET_RLIM_INFINITY;
1001     else
1002         target_rlim_swap = rlim;
1003     result = tswapal(target_rlim_swap);
1004 
1005     return result;
1006 }
1007 #endif
1008 
1009 static inline int target_to_host_resource(int code)
1010 {
1011     switch (code) {
1012     case TARGET_RLIMIT_AS:
1013         return RLIMIT_AS;
1014     case TARGET_RLIMIT_CORE:
1015         return RLIMIT_CORE;
1016     case TARGET_RLIMIT_CPU:
1017         return RLIMIT_CPU;
1018     case TARGET_RLIMIT_DATA:
1019         return RLIMIT_DATA;
1020     case TARGET_RLIMIT_FSIZE:
1021         return RLIMIT_FSIZE;
1022     case TARGET_RLIMIT_LOCKS:
1023         return RLIMIT_LOCKS;
1024     case TARGET_RLIMIT_MEMLOCK:
1025         return RLIMIT_MEMLOCK;
1026     case TARGET_RLIMIT_MSGQUEUE:
1027         return RLIMIT_MSGQUEUE;
1028     case TARGET_RLIMIT_NICE:
1029         return RLIMIT_NICE;
1030     case TARGET_RLIMIT_NOFILE:
1031         return RLIMIT_NOFILE;
1032     case TARGET_RLIMIT_NPROC:
1033         return RLIMIT_NPROC;
1034     case TARGET_RLIMIT_RSS:
1035         return RLIMIT_RSS;
1036     case TARGET_RLIMIT_RTPRIO:
1037         return RLIMIT_RTPRIO;
1038     case TARGET_RLIMIT_SIGPENDING:
1039         return RLIMIT_SIGPENDING;
1040     case TARGET_RLIMIT_STACK:
1041         return RLIMIT_STACK;
1042     default:
1043         return code;
1044     }
1045 }
1046 
1047 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1048                                               abi_ulong target_tv_addr)
1049 {
1050     struct target_timeval *target_tv;
1051 
1052     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1053         return -TARGET_EFAULT;
1054     }
1055 
1056     __get_user(tv->tv_sec, &target_tv->tv_sec);
1057     __get_user(tv->tv_usec, &target_tv->tv_usec);
1058 
1059     unlock_user_struct(target_tv, target_tv_addr, 0);
1060 
1061     return 0;
1062 }
1063 
1064 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1065                                             const struct timeval *tv)
1066 {
1067     struct target_timeval *target_tv;
1068 
1069     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1070         return -TARGET_EFAULT;
1071     }
1072 
1073     __put_user(tv->tv_sec, &target_tv->tv_sec);
1074     __put_user(tv->tv_usec, &target_tv->tv_usec);
1075 
1076     unlock_user_struct(target_tv, target_tv_addr, 1);
1077 
1078     return 0;
1079 }
1080 
1081 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1082 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1083                                                 abi_ulong target_tv_addr)
1084 {
1085     struct target__kernel_sock_timeval *target_tv;
1086 
1087     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1088         return -TARGET_EFAULT;
1089     }
1090 
1091     __get_user(tv->tv_sec, &target_tv->tv_sec);
1092     __get_user(tv->tv_usec, &target_tv->tv_usec);
1093 
1094     unlock_user_struct(target_tv, target_tv_addr, 0);
1095 
1096     return 0;
1097 }
1098 #endif
1099 
1100 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1101                                               const struct timeval *tv)
1102 {
1103     struct target__kernel_sock_timeval *target_tv;
1104 
1105     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1106         return -TARGET_EFAULT;
1107     }
1108 
1109     __put_user(tv->tv_sec, &target_tv->tv_sec);
1110     __put_user(tv->tv_usec, &target_tv->tv_usec);
1111 
1112     unlock_user_struct(target_tv, target_tv_addr, 1);
1113 
1114     return 0;
1115 }
1116 
1117 #if defined(TARGET_NR_futex) || \
1118     defined(TARGET_NR_rt_sigtimedwait) || \
1119     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1120     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1121     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1122     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1123     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1124     defined(TARGET_NR_timer_settime) || \
1125     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1126 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1127                                                abi_ulong target_addr)
1128 {
1129     struct target_timespec *target_ts;
1130 
1131     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1132         return -TARGET_EFAULT;
1133     }
1134     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1135     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1136     unlock_user_struct(target_ts, target_addr, 0);
1137     return 0;
1138 }
1139 #endif
1140 
1141 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1142     defined(TARGET_NR_timer_settime64) || \
1143     defined(TARGET_NR_mq_timedsend_time64) || \
1144     defined(TARGET_NR_mq_timedreceive_time64) || \
1145     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1146     defined(TARGET_NR_clock_nanosleep_time64) || \
1147     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1148     defined(TARGET_NR_utimensat) || \
1149     defined(TARGET_NR_utimensat_time64) || \
1150     defined(TARGET_NR_semtimedop_time64) || \
1151     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1152 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1153                                                  abi_ulong target_addr)
1154 {
1155     struct target__kernel_timespec *target_ts;
1156 
1157     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1158         return -TARGET_EFAULT;
1159     }
1160     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1161     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1162     /* in 32bit mode, this drops the padding */
1163     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1164     unlock_user_struct(target_ts, target_addr, 0);
1165     return 0;
1166 }
1167 #endif
1168 
1169 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1170                                                struct timespec *host_ts)
1171 {
1172     struct target_timespec *target_ts;
1173 
1174     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1175         return -TARGET_EFAULT;
1176     }
1177     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1178     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1179     unlock_user_struct(target_ts, target_addr, 1);
1180     return 0;
1181 }
1182 
1183 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1184                                                  struct timespec *host_ts)
1185 {
1186     struct target__kernel_timespec *target_ts;
1187 
1188     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1189         return -TARGET_EFAULT;
1190     }
1191     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1192     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1193     unlock_user_struct(target_ts, target_addr, 1);
1194     return 0;
1195 }
1196 
1197 #if defined(TARGET_NR_gettimeofday)
1198 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1199                                              struct timezone *tz)
1200 {
1201     struct target_timezone *target_tz;
1202 
1203     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1204         return -TARGET_EFAULT;
1205     }
1206 
1207     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1208     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1209 
1210     unlock_user_struct(target_tz, target_tz_addr, 1);
1211 
1212     return 0;
1213 }
1214 #endif
1215 
1216 #if defined(TARGET_NR_settimeofday)
1217 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1218                                                abi_ulong target_tz_addr)
1219 {
1220     struct target_timezone *target_tz;
1221 
1222     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1223         return -TARGET_EFAULT;
1224     }
1225 
1226     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1227     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1228 
1229     unlock_user_struct(target_tz, target_tz_addr, 0);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1236 #include <mqueue.h>
1237 
1238 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1239                                               abi_ulong target_mq_attr_addr)
1240 {
1241     struct target_mq_attr *target_mq_attr;
1242 
1243     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1244                           target_mq_attr_addr, 1))
1245         return -TARGET_EFAULT;
1246 
1247     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1248     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1249     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1250     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1251 
1252     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1253 
1254     return 0;
1255 }
1256 
1257 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1258                                             const struct mq_attr *attr)
1259 {
1260     struct target_mq_attr *target_mq_attr;
1261 
1262     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1263                           target_mq_attr_addr, 0))
1264         return -TARGET_EFAULT;
1265 
1266     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1267     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1268     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1269     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1270 
1271     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1272 
1273     return 0;
1274 }
1275 #endif
1276 
1277 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1278 /* do_select() must return target values and target errnos. */
1279 static abi_long do_select(int n,
1280                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1281                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1282 {
1283     fd_set rfds, wfds, efds;
1284     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1285     struct timeval tv;
1286     struct timespec ts, *ts_ptr;
1287     abi_long ret;
1288 
1289     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1290     if (ret) {
1291         return ret;
1292     }
1293     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1294     if (ret) {
1295         return ret;
1296     }
1297     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1298     if (ret) {
1299         return ret;
1300     }
1301 
1302     if (target_tv_addr) {
1303         if (copy_from_user_timeval(&tv, target_tv_addr))
1304             return -TARGET_EFAULT;
1305         ts.tv_sec = tv.tv_sec;
1306         ts.tv_nsec = tv.tv_usec * 1000;
1307         ts_ptr = &ts;
1308     } else {
1309         ts_ptr = NULL;
1310     }
1311 
1312     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1313                                   ts_ptr, NULL));
1314 
1315     if (!is_error(ret)) {
1316         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1317             return -TARGET_EFAULT;
1318         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1319             return -TARGET_EFAULT;
1320         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1321             return -TARGET_EFAULT;
1322 
1323         if (target_tv_addr) {
1324             tv.tv_sec = ts.tv_sec;
1325             tv.tv_usec = ts.tv_nsec / 1000;
1326             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1327                 return -TARGET_EFAULT;
1328             }
1329         }
1330     }
1331 
1332     return ret;
1333 }
1334 
1335 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1336 static abi_long do_old_select(abi_ulong arg1)
1337 {
1338     struct target_sel_arg_struct *sel;
1339     abi_ulong inp, outp, exp, tvp;
1340     long nsel;
1341 
1342     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1343         return -TARGET_EFAULT;
1344     }
1345 
1346     nsel = tswapal(sel->n);
1347     inp = tswapal(sel->inp);
1348     outp = tswapal(sel->outp);
1349     exp = tswapal(sel->exp);
1350     tvp = tswapal(sel->tvp);
1351 
1352     unlock_user_struct(sel, arg1, 0);
1353 
1354     return do_select(nsel, inp, outp, exp, tvp);
1355 }
1356 #endif
1357 #endif
1358 
1359 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1360 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1361                             abi_long arg4, abi_long arg5, abi_long arg6,
1362                             bool time64)
1363 {
1364     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1365     fd_set rfds, wfds, efds;
1366     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1367     struct timespec ts, *ts_ptr;
1368     abi_long ret;
1369 
1370     /*
1371      * The 6th arg is actually two args smashed together,
1372      * so we cannot use the C library.
1373      */
1374     sigset_t set;
1375     struct {
1376         sigset_t *set;
1377         size_t size;
1378     } sig, *sig_ptr;
1379 
1380     abi_ulong arg_sigset, arg_sigsize, *arg7;
1381     target_sigset_t *target_sigset;
1382 
1383     n = arg1;
1384     rfd_addr = arg2;
1385     wfd_addr = arg3;
1386     efd_addr = arg4;
1387     ts_addr = arg5;
1388 
1389     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1390     if (ret) {
1391         return ret;
1392     }
1393     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1394     if (ret) {
1395         return ret;
1396     }
1397     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1398     if (ret) {
1399         return ret;
1400     }
1401 
1402     /*
1403      * This takes a timespec, and not a timeval, so we cannot
1404      * use the do_select() helper ...
1405      */
1406     if (ts_addr) {
1407         if (time64) {
1408             if (target_to_host_timespec64(&ts, ts_addr)) {
1409                 return -TARGET_EFAULT;
1410             }
1411         } else {
1412             if (target_to_host_timespec(&ts, ts_addr)) {
1413                 return -TARGET_EFAULT;
1414             }
1415         }
1416             ts_ptr = &ts;
1417     } else {
1418         ts_ptr = NULL;
1419     }
1420 
1421     /* Extract the two packed args for the sigset */
1422     if (arg6) {
1423         sig_ptr = &sig;
1424         sig.size = SIGSET_T_SIZE;
1425 
1426         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1427         if (!arg7) {
1428             return -TARGET_EFAULT;
1429         }
1430         arg_sigset = tswapal(arg7[0]);
1431         arg_sigsize = tswapal(arg7[1]);
1432         unlock_user(arg7, arg6, 0);
1433 
1434         if (arg_sigset) {
1435             sig.set = &set;
1436             if (arg_sigsize != sizeof(*target_sigset)) {
1437                 /* Like the kernel, we enforce correct size sigsets */
1438                 return -TARGET_EINVAL;
1439             }
1440             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1441                                       sizeof(*target_sigset), 1);
1442             if (!target_sigset) {
1443                 return -TARGET_EFAULT;
1444             }
1445             target_to_host_sigset(&set, target_sigset);
1446             unlock_user(target_sigset, arg_sigset, 0);
1447         } else {
1448             sig.set = NULL;
1449         }
1450     } else {
1451         sig_ptr = NULL;
1452     }
1453 
1454     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1455                                   ts_ptr, sig_ptr));
1456 
1457     if (!is_error(ret)) {
1458         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1459             return -TARGET_EFAULT;
1460         }
1461         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1462             return -TARGET_EFAULT;
1463         }
1464         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1465             return -TARGET_EFAULT;
1466         }
1467         if (time64) {
1468             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1469                 return -TARGET_EFAULT;
1470             }
1471         } else {
1472             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1473                 return -TARGET_EFAULT;
1474             }
1475         }
1476     }
1477     return ret;
1478 }
1479 #endif
1480 
1481 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1482     defined(TARGET_NR_ppoll_time64)
1483 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1484                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1485 {
1486     struct target_pollfd *target_pfd;
1487     unsigned int nfds = arg2;
1488     struct pollfd *pfd;
1489     unsigned int i;
1490     abi_long ret;
1491 
1492     pfd = NULL;
1493     target_pfd = NULL;
1494     if (nfds) {
1495         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1496             return -TARGET_EINVAL;
1497         }
1498         target_pfd = lock_user(VERIFY_WRITE, arg1,
1499                                sizeof(struct target_pollfd) * nfds, 1);
1500         if (!target_pfd) {
1501             return -TARGET_EFAULT;
1502         }
1503 
1504         pfd = alloca(sizeof(struct pollfd) * nfds);
1505         for (i = 0; i < nfds; i++) {
1506             pfd[i].fd = tswap32(target_pfd[i].fd);
1507             pfd[i].events = tswap16(target_pfd[i].events);
1508         }
1509     }
1510     if (ppoll) {
1511         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1512         target_sigset_t *target_set;
1513         sigset_t _set, *set = &_set;
1514 
1515         if (arg3) {
1516             if (time64) {
1517                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1518                     unlock_user(target_pfd, arg1, 0);
1519                     return -TARGET_EFAULT;
1520                 }
1521             } else {
1522                 if (target_to_host_timespec(timeout_ts, arg3)) {
1523                     unlock_user(target_pfd, arg1, 0);
1524                     return -TARGET_EFAULT;
1525                 }
1526             }
1527         } else {
1528             timeout_ts = NULL;
1529         }
1530 
1531         if (arg4) {
1532             if (arg5 != sizeof(target_sigset_t)) {
1533                 unlock_user(target_pfd, arg1, 0);
1534                 return -TARGET_EINVAL;
1535             }
1536 
1537             target_set = lock_user(VERIFY_READ, arg4,
1538                                    sizeof(target_sigset_t), 1);
1539             if (!target_set) {
1540                 unlock_user(target_pfd, arg1, 0);
1541                 return -TARGET_EFAULT;
1542             }
1543             target_to_host_sigset(set, target_set);
1544         } else {
1545             set = NULL;
1546         }
1547 
1548         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1549                                    set, SIGSET_T_SIZE));
1550 
1551         if (!is_error(ret) && arg3) {
1552             if (time64) {
1553                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1554                     return -TARGET_EFAULT;
1555                 }
1556             } else {
1557                 if (host_to_target_timespec(arg3, timeout_ts)) {
1558                     return -TARGET_EFAULT;
1559                 }
1560             }
1561         }
1562         if (arg4) {
1563             unlock_user(target_set, arg4, 0);
1564         }
1565     } else {
1566           struct timespec ts, *pts;
1567 
1568           if (arg3 >= 0) {
1569               /* Convert ms to secs, ns */
1570               ts.tv_sec = arg3 / 1000;
1571               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1572               pts = &ts;
1573           } else {
1574               /* -ve poll() timeout means "infinite" */
1575               pts = NULL;
1576           }
1577           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1578     }
1579 
1580     if (!is_error(ret)) {
1581         for (i = 0; i < nfds; i++) {
1582             target_pfd[i].revents = tswap16(pfd[i].revents);
1583         }
1584     }
1585     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1586     return ret;
1587 }
1588 #endif
1589 
1590 static abi_long do_pipe2(int host_pipe[], int flags)
1591 {
1592 #ifdef CONFIG_PIPE2
1593     return pipe2(host_pipe, flags);
1594 #else
1595     return -ENOSYS;
1596 #endif
1597 }
1598 
1599 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1600                         int flags, int is_pipe2)
1601 {
1602     int host_pipe[2];
1603     abi_long ret;
1604     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1605 
1606     if (is_error(ret))
1607         return get_errno(ret);
1608 
1609     /* Several targets have special calling conventions for the original
1610        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1611     if (!is_pipe2) {
1612 #if defined(TARGET_ALPHA)
1613         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1614         return host_pipe[0];
1615 #elif defined(TARGET_MIPS)
1616         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1617         return host_pipe[0];
1618 #elif defined(TARGET_SH4)
1619         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1620         return host_pipe[0];
1621 #elif defined(TARGET_SPARC)
1622         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1623         return host_pipe[0];
1624 #endif
1625     }
1626 
1627     if (put_user_s32(host_pipe[0], pipedes)
1628         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1629         return -TARGET_EFAULT;
1630     return get_errno(ret);
1631 }
1632 
1633 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1634                                               abi_ulong target_addr,
1635                                               socklen_t len)
1636 {
1637     struct target_ip_mreqn *target_smreqn;
1638 
1639     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1640     if (!target_smreqn)
1641         return -TARGET_EFAULT;
1642     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1643     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1644     if (len == sizeof(struct target_ip_mreqn))
1645         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1646     unlock_user(target_smreqn, target_addr, 0);
1647 
1648     return 0;
1649 }
1650 
1651 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1652                                                abi_ulong target_addr,
1653                                                socklen_t len)
1654 {
1655     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1656     sa_family_t sa_family;
1657     struct target_sockaddr *target_saddr;
1658 
1659     if (fd_trans_target_to_host_addr(fd)) {
1660         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1661     }
1662 
1663     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1664     if (!target_saddr)
1665         return -TARGET_EFAULT;
1666 
1667     sa_family = tswap16(target_saddr->sa_family);
1668 
1669     /* Oops. The caller might send a incomplete sun_path; sun_path
1670      * must be terminated by \0 (see the manual page), but
1671      * unfortunately it is quite common to specify sockaddr_un
1672      * length as "strlen(x->sun_path)" while it should be
1673      * "strlen(...) + 1". We'll fix that here if needed.
1674      * Linux kernel has a similar feature.
1675      */
1676 
1677     if (sa_family == AF_UNIX) {
1678         if (len < unix_maxlen && len > 0) {
1679             char *cp = (char*)target_saddr;
1680 
1681             if ( cp[len-1] && !cp[len] )
1682                 len++;
1683         }
1684         if (len > unix_maxlen)
1685             len = unix_maxlen;
1686     }
1687 
1688     memcpy(addr, target_saddr, len);
1689     addr->sa_family = sa_family;
1690     if (sa_family == AF_NETLINK) {
1691         struct sockaddr_nl *nladdr;
1692 
1693         nladdr = (struct sockaddr_nl *)addr;
1694         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1695         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1696     } else if (sa_family == AF_PACKET) {
1697 	struct target_sockaddr_ll *lladdr;
1698 
1699 	lladdr = (struct target_sockaddr_ll *)addr;
1700 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1701 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1702     }
1703     unlock_user(target_saddr, target_addr, 0);
1704 
1705     return 0;
1706 }
1707 
1708 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1709                                                struct sockaddr *addr,
1710                                                socklen_t len)
1711 {
1712     struct target_sockaddr *target_saddr;
1713 
1714     if (len == 0) {
1715         return 0;
1716     }
1717     assert(addr);
1718 
1719     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1720     if (!target_saddr)
1721         return -TARGET_EFAULT;
1722     memcpy(target_saddr, addr, len);
1723     if (len >= offsetof(struct target_sockaddr, sa_family) +
1724         sizeof(target_saddr->sa_family)) {
1725         target_saddr->sa_family = tswap16(addr->sa_family);
1726     }
1727     if (addr->sa_family == AF_NETLINK &&
1728         len >= sizeof(struct target_sockaddr_nl)) {
1729         struct target_sockaddr_nl *target_nl =
1730                (struct target_sockaddr_nl *)target_saddr;
1731         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1732         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1733     } else if (addr->sa_family == AF_PACKET) {
1734         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1735         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1736         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1737     } else if (addr->sa_family == AF_INET6 &&
1738                len >= sizeof(struct target_sockaddr_in6)) {
1739         struct target_sockaddr_in6 *target_in6 =
1740                (struct target_sockaddr_in6 *)target_saddr;
1741         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1742     }
1743     unlock_user(target_saddr, target_addr, len);
1744 
1745     return 0;
1746 }
1747 
1748 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1749                                            struct target_msghdr *target_msgh)
1750 {
1751     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1752     abi_long msg_controllen;
1753     abi_ulong target_cmsg_addr;
1754     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1755     socklen_t space = 0;
1756 
1757     msg_controllen = tswapal(target_msgh->msg_controllen);
1758     if (msg_controllen < sizeof (struct target_cmsghdr))
1759         goto the_end;
1760     target_cmsg_addr = tswapal(target_msgh->msg_control);
1761     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1762     target_cmsg_start = target_cmsg;
1763     if (!target_cmsg)
1764         return -TARGET_EFAULT;
1765 
1766     while (cmsg && target_cmsg) {
1767         void *data = CMSG_DATA(cmsg);
1768         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1769 
1770         int len = tswapal(target_cmsg->cmsg_len)
1771             - sizeof(struct target_cmsghdr);
1772 
1773         space += CMSG_SPACE(len);
1774         if (space > msgh->msg_controllen) {
1775             space -= CMSG_SPACE(len);
1776             /* This is a QEMU bug, since we allocated the payload
1777              * area ourselves (unlike overflow in host-to-target
1778              * conversion, which is just the guest giving us a buffer
1779              * that's too small). It can't happen for the payload types
1780              * we currently support; if it becomes an issue in future
1781              * we would need to improve our allocation strategy to
1782              * something more intelligent than "twice the size of the
1783              * target buffer we're reading from".
1784              */
1785             qemu_log_mask(LOG_UNIMP,
1786                           ("Unsupported ancillary data %d/%d: "
1787                            "unhandled msg size\n"),
1788                           tswap32(target_cmsg->cmsg_level),
1789                           tswap32(target_cmsg->cmsg_type));
1790             break;
1791         }
1792 
1793         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1794             cmsg->cmsg_level = SOL_SOCKET;
1795         } else {
1796             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1797         }
1798         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1799         cmsg->cmsg_len = CMSG_LEN(len);
1800 
1801         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1802             int *fd = (int *)data;
1803             int *target_fd = (int *)target_data;
1804             int i, numfds = len / sizeof(int);
1805 
1806             for (i = 0; i < numfds; i++) {
1807                 __get_user(fd[i], target_fd + i);
1808             }
1809         } else if (cmsg->cmsg_level == SOL_SOCKET
1810                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1811             struct ucred *cred = (struct ucred *)data;
1812             struct target_ucred *target_cred =
1813                 (struct target_ucred *)target_data;
1814 
1815             __get_user(cred->pid, &target_cred->pid);
1816             __get_user(cred->uid, &target_cred->uid);
1817             __get_user(cred->gid, &target_cred->gid);
1818         } else {
1819             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1820                           cmsg->cmsg_level, cmsg->cmsg_type);
1821             memcpy(data, target_data, len);
1822         }
1823 
1824         cmsg = CMSG_NXTHDR(msgh, cmsg);
1825         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1826                                          target_cmsg_start);
1827     }
1828     unlock_user(target_cmsg, target_cmsg_addr, 0);
1829  the_end:
1830     msgh->msg_controllen = space;
1831     return 0;
1832 }
1833 
1834 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1835                                            struct msghdr *msgh)
1836 {
1837     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1838     abi_long msg_controllen;
1839     abi_ulong target_cmsg_addr;
1840     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1841     socklen_t space = 0;
1842 
1843     msg_controllen = tswapal(target_msgh->msg_controllen);
1844     if (msg_controllen < sizeof (struct target_cmsghdr))
1845         goto the_end;
1846     target_cmsg_addr = tswapal(target_msgh->msg_control);
1847     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1848     target_cmsg_start = target_cmsg;
1849     if (!target_cmsg)
1850         return -TARGET_EFAULT;
1851 
1852     while (cmsg && target_cmsg) {
1853         void *data = CMSG_DATA(cmsg);
1854         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1855 
1856         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1857         int tgt_len, tgt_space;
1858 
1859         /* We never copy a half-header but may copy half-data;
1860          * this is Linux's behaviour in put_cmsg(). Note that
1861          * truncation here is a guest problem (which we report
1862          * to the guest via the CTRUNC bit), unlike truncation
1863          * in target_to_host_cmsg, which is a QEMU bug.
1864          */
1865         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1866             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1867             break;
1868         }
1869 
1870         if (cmsg->cmsg_level == SOL_SOCKET) {
1871             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1872         } else {
1873             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1874         }
1875         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1876 
1877         /* Payload types which need a different size of payload on
1878          * the target must adjust tgt_len here.
1879          */
1880         tgt_len = len;
1881         switch (cmsg->cmsg_level) {
1882         case SOL_SOCKET:
1883             switch (cmsg->cmsg_type) {
1884             case SO_TIMESTAMP:
1885                 tgt_len = sizeof(struct target_timeval);
1886                 break;
1887             default:
1888                 break;
1889             }
1890             break;
1891         default:
1892             break;
1893         }
1894 
1895         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1896             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1897             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1898         }
1899 
1900         /* We must now copy-and-convert len bytes of payload
1901          * into tgt_len bytes of destination space. Bear in mind
1902          * that in both source and destination we may be dealing
1903          * with a truncated value!
1904          */
1905         switch (cmsg->cmsg_level) {
1906         case SOL_SOCKET:
1907             switch (cmsg->cmsg_type) {
1908             case SCM_RIGHTS:
1909             {
1910                 int *fd = (int *)data;
1911                 int *target_fd = (int *)target_data;
1912                 int i, numfds = tgt_len / sizeof(int);
1913 
1914                 for (i = 0; i < numfds; i++) {
1915                     __put_user(fd[i], target_fd + i);
1916                 }
1917                 break;
1918             }
1919             case SO_TIMESTAMP:
1920             {
1921                 struct timeval *tv = (struct timeval *)data;
1922                 struct target_timeval *target_tv =
1923                     (struct target_timeval *)target_data;
1924 
1925                 if (len != sizeof(struct timeval) ||
1926                     tgt_len != sizeof(struct target_timeval)) {
1927                     goto unimplemented;
1928                 }
1929 
1930                 /* copy struct timeval to target */
1931                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1932                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1933                 break;
1934             }
1935             case SCM_CREDENTIALS:
1936             {
1937                 struct ucred *cred = (struct ucred *)data;
1938                 struct target_ucred *target_cred =
1939                     (struct target_ucred *)target_data;
1940 
1941                 __put_user(cred->pid, &target_cred->pid);
1942                 __put_user(cred->uid, &target_cred->uid);
1943                 __put_user(cred->gid, &target_cred->gid);
1944                 break;
1945             }
1946             default:
1947                 goto unimplemented;
1948             }
1949             break;
1950 
1951         case SOL_IP:
1952             switch (cmsg->cmsg_type) {
1953             case IP_TTL:
1954             {
1955                 uint32_t *v = (uint32_t *)data;
1956                 uint32_t *t_int = (uint32_t *)target_data;
1957 
1958                 if (len != sizeof(uint32_t) ||
1959                     tgt_len != sizeof(uint32_t)) {
1960                     goto unimplemented;
1961                 }
1962                 __put_user(*v, t_int);
1963                 break;
1964             }
1965             case IP_RECVERR:
1966             {
1967                 struct errhdr_t {
1968                    struct sock_extended_err ee;
1969                    struct sockaddr_in offender;
1970                 };
1971                 struct errhdr_t *errh = (struct errhdr_t *)data;
1972                 struct errhdr_t *target_errh =
1973                     (struct errhdr_t *)target_data;
1974 
1975                 if (len != sizeof(struct errhdr_t) ||
1976                     tgt_len != sizeof(struct errhdr_t)) {
1977                     goto unimplemented;
1978                 }
1979                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1980                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1981                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1982                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1983                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1984                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1985                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1986                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1987                     (void *) &errh->offender, sizeof(errh->offender));
1988                 break;
1989             }
1990             default:
1991                 goto unimplemented;
1992             }
1993             break;
1994 
1995         case SOL_IPV6:
1996             switch (cmsg->cmsg_type) {
1997             case IPV6_HOPLIMIT:
1998             {
1999                 uint32_t *v = (uint32_t *)data;
2000                 uint32_t *t_int = (uint32_t *)target_data;
2001 
2002                 if (len != sizeof(uint32_t) ||
2003                     tgt_len != sizeof(uint32_t)) {
2004                     goto unimplemented;
2005                 }
2006                 __put_user(*v, t_int);
2007                 break;
2008             }
2009             case IPV6_RECVERR:
2010             {
2011                 struct errhdr6_t {
2012                    struct sock_extended_err ee;
2013                    struct sockaddr_in6 offender;
2014                 };
2015                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2016                 struct errhdr6_t *target_errh =
2017                     (struct errhdr6_t *)target_data;
2018 
2019                 if (len != sizeof(struct errhdr6_t) ||
2020                     tgt_len != sizeof(struct errhdr6_t)) {
2021                     goto unimplemented;
2022                 }
2023                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2024                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2025                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2026                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2027                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2028                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2029                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2030                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2031                     (void *) &errh->offender, sizeof(errh->offender));
2032                 break;
2033             }
2034             default:
2035                 goto unimplemented;
2036             }
2037             break;
2038 
2039         default:
2040         unimplemented:
2041             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2042                           cmsg->cmsg_level, cmsg->cmsg_type);
2043             memcpy(target_data, data, MIN(len, tgt_len));
2044             if (tgt_len > len) {
2045                 memset(target_data + len, 0, tgt_len - len);
2046             }
2047         }
2048 
2049         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2050         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2051         if (msg_controllen < tgt_space) {
2052             tgt_space = msg_controllen;
2053         }
2054         msg_controllen -= tgt_space;
2055         space += tgt_space;
2056         cmsg = CMSG_NXTHDR(msgh, cmsg);
2057         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2058                                          target_cmsg_start);
2059     }
2060     unlock_user(target_cmsg, target_cmsg_addr, space);
2061  the_end:
2062     target_msgh->msg_controllen = tswapal(space);
2063     return 0;
2064 }
2065 
2066 /* do_setsockopt() Must return target values and target errnos. */
2067 static abi_long do_setsockopt(int sockfd, int level, int optname,
2068                               abi_ulong optval_addr, socklen_t optlen)
2069 {
2070     abi_long ret;
2071     int val;
2072     struct ip_mreqn *ip_mreq;
2073     struct ip_mreq_source *ip_mreq_source;
2074 
2075     switch(level) {
2076     case SOL_TCP:
2077     case SOL_UDP:
2078         /* TCP and UDP options all take an 'int' value.  */
2079         if (optlen < sizeof(uint32_t))
2080             return -TARGET_EINVAL;
2081 
2082         if (get_user_u32(val, optval_addr))
2083             return -TARGET_EFAULT;
2084         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2085         break;
2086     case SOL_IP:
2087         switch(optname) {
2088         case IP_TOS:
2089         case IP_TTL:
2090         case IP_HDRINCL:
2091         case IP_ROUTER_ALERT:
2092         case IP_RECVOPTS:
2093         case IP_RETOPTS:
2094         case IP_PKTINFO:
2095         case IP_MTU_DISCOVER:
2096         case IP_RECVERR:
2097         case IP_RECVTTL:
2098         case IP_RECVTOS:
2099 #ifdef IP_FREEBIND
2100         case IP_FREEBIND:
2101 #endif
2102         case IP_MULTICAST_TTL:
2103         case IP_MULTICAST_LOOP:
2104             val = 0;
2105             if (optlen >= sizeof(uint32_t)) {
2106                 if (get_user_u32(val, optval_addr))
2107                     return -TARGET_EFAULT;
2108             } else if (optlen >= 1) {
2109                 if (get_user_u8(val, optval_addr))
2110                     return -TARGET_EFAULT;
2111             }
2112             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2113             break;
2114         case IP_ADD_MEMBERSHIP:
2115         case IP_DROP_MEMBERSHIP:
2116             if (optlen < sizeof (struct target_ip_mreq) ||
2117                 optlen > sizeof (struct target_ip_mreqn))
2118                 return -TARGET_EINVAL;
2119 
2120             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2121             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2122             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2123             break;
2124 
2125         case IP_BLOCK_SOURCE:
2126         case IP_UNBLOCK_SOURCE:
2127         case IP_ADD_SOURCE_MEMBERSHIP:
2128         case IP_DROP_SOURCE_MEMBERSHIP:
2129             if (optlen != sizeof (struct target_ip_mreq_source))
2130                 return -TARGET_EINVAL;
2131 
2132             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2133             if (!ip_mreq_source) {
2134                 return -TARGET_EFAULT;
2135             }
2136             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2137             unlock_user (ip_mreq_source, optval_addr, 0);
2138             break;
2139 
2140         default:
2141             goto unimplemented;
2142         }
2143         break;
2144     case SOL_IPV6:
2145         switch (optname) {
2146         case IPV6_MTU_DISCOVER:
2147         case IPV6_MTU:
2148         case IPV6_V6ONLY:
2149         case IPV6_RECVPKTINFO:
2150         case IPV6_UNICAST_HOPS:
2151         case IPV6_MULTICAST_HOPS:
2152         case IPV6_MULTICAST_LOOP:
2153         case IPV6_RECVERR:
2154         case IPV6_RECVHOPLIMIT:
2155         case IPV6_2292HOPLIMIT:
2156         case IPV6_CHECKSUM:
2157         case IPV6_ADDRFORM:
2158         case IPV6_2292PKTINFO:
2159         case IPV6_RECVTCLASS:
2160         case IPV6_RECVRTHDR:
2161         case IPV6_2292RTHDR:
2162         case IPV6_RECVHOPOPTS:
2163         case IPV6_2292HOPOPTS:
2164         case IPV6_RECVDSTOPTS:
2165         case IPV6_2292DSTOPTS:
2166         case IPV6_TCLASS:
2167         case IPV6_ADDR_PREFERENCES:
2168 #ifdef IPV6_RECVPATHMTU
2169         case IPV6_RECVPATHMTU:
2170 #endif
2171 #ifdef IPV6_TRANSPARENT
2172         case IPV6_TRANSPARENT:
2173 #endif
2174 #ifdef IPV6_FREEBIND
2175         case IPV6_FREEBIND:
2176 #endif
2177 #ifdef IPV6_RECVORIGDSTADDR
2178         case IPV6_RECVORIGDSTADDR:
2179 #endif
2180             val = 0;
2181             if (optlen < sizeof(uint32_t)) {
2182                 return -TARGET_EINVAL;
2183             }
2184             if (get_user_u32(val, optval_addr)) {
2185                 return -TARGET_EFAULT;
2186             }
2187             ret = get_errno(setsockopt(sockfd, level, optname,
2188                                        &val, sizeof(val)));
2189             break;
2190         case IPV6_PKTINFO:
2191         {
2192             struct in6_pktinfo pki;
2193 
2194             if (optlen < sizeof(pki)) {
2195                 return -TARGET_EINVAL;
2196             }
2197 
2198             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2199                 return -TARGET_EFAULT;
2200             }
2201 
2202             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2203 
2204             ret = get_errno(setsockopt(sockfd, level, optname,
2205                                        &pki, sizeof(pki)));
2206             break;
2207         }
2208         case IPV6_ADD_MEMBERSHIP:
2209         case IPV6_DROP_MEMBERSHIP:
2210         {
2211             struct ipv6_mreq ipv6mreq;
2212 
2213             if (optlen < sizeof(ipv6mreq)) {
2214                 return -TARGET_EINVAL;
2215             }
2216 
2217             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2218                 return -TARGET_EFAULT;
2219             }
2220 
2221             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2222 
2223             ret = get_errno(setsockopt(sockfd, level, optname,
2224                                        &ipv6mreq, sizeof(ipv6mreq)));
2225             break;
2226         }
2227         default:
2228             goto unimplemented;
2229         }
2230         break;
2231     case SOL_ICMPV6:
2232         switch (optname) {
2233         case ICMPV6_FILTER:
2234         {
2235             struct icmp6_filter icmp6f;
2236 
2237             if (optlen > sizeof(icmp6f)) {
2238                 optlen = sizeof(icmp6f);
2239             }
2240 
2241             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2242                 return -TARGET_EFAULT;
2243             }
2244 
2245             for (val = 0; val < 8; val++) {
2246                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2247             }
2248 
2249             ret = get_errno(setsockopt(sockfd, level, optname,
2250                                        &icmp6f, optlen));
2251             break;
2252         }
2253         default:
2254             goto unimplemented;
2255         }
2256         break;
2257     case SOL_RAW:
2258         switch (optname) {
2259         case ICMP_FILTER:
2260         case IPV6_CHECKSUM:
2261             /* those take an u32 value */
2262             if (optlen < sizeof(uint32_t)) {
2263                 return -TARGET_EINVAL;
2264             }
2265 
2266             if (get_user_u32(val, optval_addr)) {
2267                 return -TARGET_EFAULT;
2268             }
2269             ret = get_errno(setsockopt(sockfd, level, optname,
2270                                        &val, sizeof(val)));
2271             break;
2272 
2273         default:
2274             goto unimplemented;
2275         }
2276         break;
2277 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2278     case SOL_ALG:
2279         switch (optname) {
2280         case ALG_SET_KEY:
2281         {
2282             char *alg_key = g_malloc(optlen);
2283 
2284             if (!alg_key) {
2285                 return -TARGET_ENOMEM;
2286             }
2287             if (copy_from_user(alg_key, optval_addr, optlen)) {
2288                 g_free(alg_key);
2289                 return -TARGET_EFAULT;
2290             }
2291             ret = get_errno(setsockopt(sockfd, level, optname,
2292                                        alg_key, optlen));
2293             g_free(alg_key);
2294             break;
2295         }
2296         case ALG_SET_AEAD_AUTHSIZE:
2297         {
2298             ret = get_errno(setsockopt(sockfd, level, optname,
2299                                        NULL, optlen));
2300             break;
2301         }
2302         default:
2303             goto unimplemented;
2304         }
2305         break;
2306 #endif
2307     case TARGET_SOL_SOCKET:
2308         switch (optname) {
2309         case TARGET_SO_RCVTIMEO:
2310         {
2311                 struct timeval tv;
2312 
2313                 optname = SO_RCVTIMEO;
2314 
2315 set_timeout:
2316                 if (optlen != sizeof(struct target_timeval)) {
2317                     return -TARGET_EINVAL;
2318                 }
2319 
2320                 if (copy_from_user_timeval(&tv, optval_addr)) {
2321                     return -TARGET_EFAULT;
2322                 }
2323 
2324                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2325                                 &tv, sizeof(tv)));
2326                 return ret;
2327         }
2328         case TARGET_SO_SNDTIMEO:
2329                 optname = SO_SNDTIMEO;
2330                 goto set_timeout;
2331         case TARGET_SO_ATTACH_FILTER:
2332         {
2333                 struct target_sock_fprog *tfprog;
2334                 struct target_sock_filter *tfilter;
2335                 struct sock_fprog fprog;
2336                 struct sock_filter *filter;
2337                 int i;
2338 
2339                 if (optlen != sizeof(*tfprog)) {
2340                     return -TARGET_EINVAL;
2341                 }
2342                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2343                     return -TARGET_EFAULT;
2344                 }
2345                 if (!lock_user_struct(VERIFY_READ, tfilter,
2346                                       tswapal(tfprog->filter), 0)) {
2347                     unlock_user_struct(tfprog, optval_addr, 1);
2348                     return -TARGET_EFAULT;
2349                 }
2350 
2351                 fprog.len = tswap16(tfprog->len);
2352                 filter = g_try_new(struct sock_filter, fprog.len);
2353                 if (filter == NULL) {
2354                     unlock_user_struct(tfilter, tfprog->filter, 1);
2355                     unlock_user_struct(tfprog, optval_addr, 1);
2356                     return -TARGET_ENOMEM;
2357                 }
2358                 for (i = 0; i < fprog.len; i++) {
2359                     filter[i].code = tswap16(tfilter[i].code);
2360                     filter[i].jt = tfilter[i].jt;
2361                     filter[i].jf = tfilter[i].jf;
2362                     filter[i].k = tswap32(tfilter[i].k);
2363                 }
2364                 fprog.filter = filter;
2365 
2366                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2367                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2368                 g_free(filter);
2369 
2370                 unlock_user_struct(tfilter, tfprog->filter, 1);
2371                 unlock_user_struct(tfprog, optval_addr, 1);
2372                 return ret;
2373         }
2374 	case TARGET_SO_BINDTODEVICE:
2375 	{
2376 		char *dev_ifname, *addr_ifname;
2377 
2378 		if (optlen > IFNAMSIZ - 1) {
2379 		    optlen = IFNAMSIZ - 1;
2380 		}
2381 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2382 		if (!dev_ifname) {
2383 		    return -TARGET_EFAULT;
2384 		}
2385 		optname = SO_BINDTODEVICE;
2386 		addr_ifname = alloca(IFNAMSIZ);
2387 		memcpy(addr_ifname, dev_ifname, optlen);
2388 		addr_ifname[optlen] = 0;
2389 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2390                                            addr_ifname, optlen));
2391 		unlock_user (dev_ifname, optval_addr, 0);
2392 		return ret;
2393 	}
2394         case TARGET_SO_LINGER:
2395         {
2396                 struct linger lg;
2397                 struct target_linger *tlg;
2398 
2399                 if (optlen != sizeof(struct target_linger)) {
2400                     return -TARGET_EINVAL;
2401                 }
2402                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2403                     return -TARGET_EFAULT;
2404                 }
2405                 __get_user(lg.l_onoff, &tlg->l_onoff);
2406                 __get_user(lg.l_linger, &tlg->l_linger);
2407                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2408                                 &lg, sizeof(lg)));
2409                 unlock_user_struct(tlg, optval_addr, 0);
2410                 return ret;
2411         }
2412             /* Options with 'int' argument.  */
2413         case TARGET_SO_DEBUG:
2414 		optname = SO_DEBUG;
2415 		break;
2416         case TARGET_SO_REUSEADDR:
2417 		optname = SO_REUSEADDR;
2418 		break;
2419 #ifdef SO_REUSEPORT
2420         case TARGET_SO_REUSEPORT:
2421                 optname = SO_REUSEPORT;
2422                 break;
2423 #endif
2424         case TARGET_SO_TYPE:
2425 		optname = SO_TYPE;
2426 		break;
2427         case TARGET_SO_ERROR:
2428 		optname = SO_ERROR;
2429 		break;
2430         case TARGET_SO_DONTROUTE:
2431 		optname = SO_DONTROUTE;
2432 		break;
2433         case TARGET_SO_BROADCAST:
2434 		optname = SO_BROADCAST;
2435 		break;
2436         case TARGET_SO_SNDBUF:
2437 		optname = SO_SNDBUF;
2438 		break;
2439         case TARGET_SO_SNDBUFFORCE:
2440                 optname = SO_SNDBUFFORCE;
2441                 break;
2442         case TARGET_SO_RCVBUF:
2443 		optname = SO_RCVBUF;
2444 		break;
2445         case TARGET_SO_RCVBUFFORCE:
2446                 optname = SO_RCVBUFFORCE;
2447                 break;
2448         case TARGET_SO_KEEPALIVE:
2449 		optname = SO_KEEPALIVE;
2450 		break;
2451         case TARGET_SO_OOBINLINE:
2452 		optname = SO_OOBINLINE;
2453 		break;
2454         case TARGET_SO_NO_CHECK:
2455 		optname = SO_NO_CHECK;
2456 		break;
2457         case TARGET_SO_PRIORITY:
2458 		optname = SO_PRIORITY;
2459 		break;
2460 #ifdef SO_BSDCOMPAT
2461         case TARGET_SO_BSDCOMPAT:
2462 		optname = SO_BSDCOMPAT;
2463 		break;
2464 #endif
2465         case TARGET_SO_PASSCRED:
2466 		optname = SO_PASSCRED;
2467 		break;
2468         case TARGET_SO_PASSSEC:
2469                 optname = SO_PASSSEC;
2470                 break;
2471         case TARGET_SO_TIMESTAMP:
2472 		optname = SO_TIMESTAMP;
2473 		break;
2474         case TARGET_SO_RCVLOWAT:
2475 		optname = SO_RCVLOWAT;
2476 		break;
2477         default:
2478             goto unimplemented;
2479         }
2480 	if (optlen < sizeof(uint32_t))
2481             return -TARGET_EINVAL;
2482 
2483 	if (get_user_u32(val, optval_addr))
2484             return -TARGET_EFAULT;
2485 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2486         break;
2487 #ifdef SOL_NETLINK
2488     case SOL_NETLINK:
2489         switch (optname) {
2490         case NETLINK_PKTINFO:
2491         case NETLINK_ADD_MEMBERSHIP:
2492         case NETLINK_DROP_MEMBERSHIP:
2493         case NETLINK_BROADCAST_ERROR:
2494         case NETLINK_NO_ENOBUFS:
2495 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2496         case NETLINK_LISTEN_ALL_NSID:
2497         case NETLINK_CAP_ACK:
2498 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2499 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2500         case NETLINK_EXT_ACK:
2501 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2502 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2503         case NETLINK_GET_STRICT_CHK:
2504 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2505             break;
2506         default:
2507             goto unimplemented;
2508         }
2509         val = 0;
2510         if (optlen < sizeof(uint32_t)) {
2511             return -TARGET_EINVAL;
2512         }
2513         if (get_user_u32(val, optval_addr)) {
2514             return -TARGET_EFAULT;
2515         }
2516         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2517                                    sizeof(val)));
2518         break;
2519 #endif /* SOL_NETLINK */
2520     default:
2521     unimplemented:
2522         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2523                       level, optname);
2524         ret = -TARGET_ENOPROTOOPT;
2525     }
2526     return ret;
2527 }
2528 
2529 /* do_getsockopt() Must return target values and target errnos. */
2530 static abi_long do_getsockopt(int sockfd, int level, int optname,
2531                               abi_ulong optval_addr, abi_ulong optlen)
2532 {
2533     abi_long ret;
2534     int len, val;
2535     socklen_t lv;
2536 
2537     switch(level) {
2538     case TARGET_SOL_SOCKET:
2539         level = SOL_SOCKET;
2540         switch (optname) {
2541         /* These don't just return a single integer */
2542         case TARGET_SO_PEERNAME:
2543             goto unimplemented;
2544         case TARGET_SO_RCVTIMEO: {
2545             struct timeval tv;
2546             socklen_t tvlen;
2547 
2548             optname = SO_RCVTIMEO;
2549 
2550 get_timeout:
2551             if (get_user_u32(len, optlen)) {
2552                 return -TARGET_EFAULT;
2553             }
2554             if (len < 0) {
2555                 return -TARGET_EINVAL;
2556             }
2557 
2558             tvlen = sizeof(tv);
2559             ret = get_errno(getsockopt(sockfd, level, optname,
2560                                        &tv, &tvlen));
2561             if (ret < 0) {
2562                 return ret;
2563             }
2564             if (len > sizeof(struct target_timeval)) {
2565                 len = sizeof(struct target_timeval);
2566             }
2567             if (copy_to_user_timeval(optval_addr, &tv)) {
2568                 return -TARGET_EFAULT;
2569             }
2570             if (put_user_u32(len, optlen)) {
2571                 return -TARGET_EFAULT;
2572             }
2573             break;
2574         }
2575         case TARGET_SO_SNDTIMEO:
2576             optname = SO_SNDTIMEO;
2577             goto get_timeout;
2578         case TARGET_SO_PEERCRED: {
2579             struct ucred cr;
2580             socklen_t crlen;
2581             struct target_ucred *tcr;
2582 
2583             if (get_user_u32(len, optlen)) {
2584                 return -TARGET_EFAULT;
2585             }
2586             if (len < 0) {
2587                 return -TARGET_EINVAL;
2588             }
2589 
2590             crlen = sizeof(cr);
2591             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2592                                        &cr, &crlen));
2593             if (ret < 0) {
2594                 return ret;
2595             }
2596             if (len > crlen) {
2597                 len = crlen;
2598             }
2599             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             __put_user(cr.pid, &tcr->pid);
2603             __put_user(cr.uid, &tcr->uid);
2604             __put_user(cr.gid, &tcr->gid);
2605             unlock_user_struct(tcr, optval_addr, 1);
2606             if (put_user_u32(len, optlen)) {
2607                 return -TARGET_EFAULT;
2608             }
2609             break;
2610         }
2611         case TARGET_SO_PEERSEC: {
2612             char *name;
2613 
2614             if (get_user_u32(len, optlen)) {
2615                 return -TARGET_EFAULT;
2616             }
2617             if (len < 0) {
2618                 return -TARGET_EINVAL;
2619             }
2620             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2621             if (!name) {
2622                 return -TARGET_EFAULT;
2623             }
2624             lv = len;
2625             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2626                                        name, &lv));
2627             if (put_user_u32(lv, optlen)) {
2628                 ret = -TARGET_EFAULT;
2629             }
2630             unlock_user(name, optval_addr, lv);
2631             break;
2632         }
2633         case TARGET_SO_LINGER:
2634         {
2635             struct linger lg;
2636             socklen_t lglen;
2637             struct target_linger *tlg;
2638 
2639             if (get_user_u32(len, optlen)) {
2640                 return -TARGET_EFAULT;
2641             }
2642             if (len < 0) {
2643                 return -TARGET_EINVAL;
2644             }
2645 
2646             lglen = sizeof(lg);
2647             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2648                                        &lg, &lglen));
2649             if (ret < 0) {
2650                 return ret;
2651             }
2652             if (len > lglen) {
2653                 len = lglen;
2654             }
2655             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             __put_user(lg.l_onoff, &tlg->l_onoff);
2659             __put_user(lg.l_linger, &tlg->l_linger);
2660             unlock_user_struct(tlg, optval_addr, 1);
2661             if (put_user_u32(len, optlen)) {
2662                 return -TARGET_EFAULT;
2663             }
2664             break;
2665         }
2666         /* Options with 'int' argument.  */
2667         case TARGET_SO_DEBUG:
2668             optname = SO_DEBUG;
2669             goto int_case;
2670         case TARGET_SO_REUSEADDR:
2671             optname = SO_REUSEADDR;
2672             goto int_case;
2673 #ifdef SO_REUSEPORT
2674         case TARGET_SO_REUSEPORT:
2675             optname = SO_REUSEPORT;
2676             goto int_case;
2677 #endif
2678         case TARGET_SO_TYPE:
2679             optname = SO_TYPE;
2680             goto int_case;
2681         case TARGET_SO_ERROR:
2682             optname = SO_ERROR;
2683             goto int_case;
2684         case TARGET_SO_DONTROUTE:
2685             optname = SO_DONTROUTE;
2686             goto int_case;
2687         case TARGET_SO_BROADCAST:
2688             optname = SO_BROADCAST;
2689             goto int_case;
2690         case TARGET_SO_SNDBUF:
2691             optname = SO_SNDBUF;
2692             goto int_case;
2693         case TARGET_SO_RCVBUF:
2694             optname = SO_RCVBUF;
2695             goto int_case;
2696         case TARGET_SO_KEEPALIVE:
2697             optname = SO_KEEPALIVE;
2698             goto int_case;
2699         case TARGET_SO_OOBINLINE:
2700             optname = SO_OOBINLINE;
2701             goto int_case;
2702         case TARGET_SO_NO_CHECK:
2703             optname = SO_NO_CHECK;
2704             goto int_case;
2705         case TARGET_SO_PRIORITY:
2706             optname = SO_PRIORITY;
2707             goto int_case;
2708 #ifdef SO_BSDCOMPAT
2709         case TARGET_SO_BSDCOMPAT:
2710             optname = SO_BSDCOMPAT;
2711             goto int_case;
2712 #endif
2713         case TARGET_SO_PASSCRED:
2714             optname = SO_PASSCRED;
2715             goto int_case;
2716         case TARGET_SO_TIMESTAMP:
2717             optname = SO_TIMESTAMP;
2718             goto int_case;
2719         case TARGET_SO_RCVLOWAT:
2720             optname = SO_RCVLOWAT;
2721             goto int_case;
2722         case TARGET_SO_ACCEPTCONN:
2723             optname = SO_ACCEPTCONN;
2724             goto int_case;
2725         case TARGET_SO_PROTOCOL:
2726             optname = SO_PROTOCOL;
2727             goto int_case;
2728         case TARGET_SO_DOMAIN:
2729             optname = SO_DOMAIN;
2730             goto int_case;
2731         default:
2732             goto int_case;
2733         }
2734         break;
2735     case SOL_TCP:
2736     case SOL_UDP:
2737         /* TCP and UDP options all take an 'int' value.  */
2738     int_case:
2739         if (get_user_u32(len, optlen))
2740             return -TARGET_EFAULT;
2741         if (len < 0)
2742             return -TARGET_EINVAL;
2743         lv = sizeof(lv);
2744         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2745         if (ret < 0)
2746             return ret;
2747         if (optname == SO_TYPE) {
2748             val = host_to_target_sock_type(val);
2749         }
2750         if (len > lv)
2751             len = lv;
2752         if (len == 4) {
2753             if (put_user_u32(val, optval_addr))
2754                 return -TARGET_EFAULT;
2755         } else {
2756             if (put_user_u8(val, optval_addr))
2757                 return -TARGET_EFAULT;
2758         }
2759         if (put_user_u32(len, optlen))
2760             return -TARGET_EFAULT;
2761         break;
2762     case SOL_IP:
2763         switch(optname) {
2764         case IP_TOS:
2765         case IP_TTL:
2766         case IP_HDRINCL:
2767         case IP_ROUTER_ALERT:
2768         case IP_RECVOPTS:
2769         case IP_RETOPTS:
2770         case IP_PKTINFO:
2771         case IP_MTU_DISCOVER:
2772         case IP_RECVERR:
2773         case IP_RECVTOS:
2774 #ifdef IP_FREEBIND
2775         case IP_FREEBIND:
2776 #endif
2777         case IP_MULTICAST_TTL:
2778         case IP_MULTICAST_LOOP:
2779             if (get_user_u32(len, optlen))
2780                 return -TARGET_EFAULT;
2781             if (len < 0)
2782                 return -TARGET_EINVAL;
2783             lv = sizeof(lv);
2784             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2785             if (ret < 0)
2786                 return ret;
2787             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2788                 len = 1;
2789                 if (put_user_u32(len, optlen)
2790                     || put_user_u8(val, optval_addr))
2791                     return -TARGET_EFAULT;
2792             } else {
2793                 if (len > sizeof(int))
2794                     len = sizeof(int);
2795                 if (put_user_u32(len, optlen)
2796                     || put_user_u32(val, optval_addr))
2797                     return -TARGET_EFAULT;
2798             }
2799             break;
2800         default:
2801             ret = -TARGET_ENOPROTOOPT;
2802             break;
2803         }
2804         break;
2805     case SOL_IPV6:
2806         switch (optname) {
2807         case IPV6_MTU_DISCOVER:
2808         case IPV6_MTU:
2809         case IPV6_V6ONLY:
2810         case IPV6_RECVPKTINFO:
2811         case IPV6_UNICAST_HOPS:
2812         case IPV6_MULTICAST_HOPS:
2813         case IPV6_MULTICAST_LOOP:
2814         case IPV6_RECVERR:
2815         case IPV6_RECVHOPLIMIT:
2816         case IPV6_2292HOPLIMIT:
2817         case IPV6_CHECKSUM:
2818         case IPV6_ADDRFORM:
2819         case IPV6_2292PKTINFO:
2820         case IPV6_RECVTCLASS:
2821         case IPV6_RECVRTHDR:
2822         case IPV6_2292RTHDR:
2823         case IPV6_RECVHOPOPTS:
2824         case IPV6_2292HOPOPTS:
2825         case IPV6_RECVDSTOPTS:
2826         case IPV6_2292DSTOPTS:
2827         case IPV6_TCLASS:
2828         case IPV6_ADDR_PREFERENCES:
2829 #ifdef IPV6_RECVPATHMTU
2830         case IPV6_RECVPATHMTU:
2831 #endif
2832 #ifdef IPV6_TRANSPARENT
2833         case IPV6_TRANSPARENT:
2834 #endif
2835 #ifdef IPV6_FREEBIND
2836         case IPV6_FREEBIND:
2837 #endif
2838 #ifdef IPV6_RECVORIGDSTADDR
2839         case IPV6_RECVORIGDSTADDR:
2840 #endif
2841             if (get_user_u32(len, optlen))
2842                 return -TARGET_EFAULT;
2843             if (len < 0)
2844                 return -TARGET_EINVAL;
2845             lv = sizeof(lv);
2846             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2847             if (ret < 0)
2848                 return ret;
2849             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2850                 len = 1;
2851                 if (put_user_u32(len, optlen)
2852                     || put_user_u8(val, optval_addr))
2853                     return -TARGET_EFAULT;
2854             } else {
2855                 if (len > sizeof(int))
2856                     len = sizeof(int);
2857                 if (put_user_u32(len, optlen)
2858                     || put_user_u32(val, optval_addr))
2859                     return -TARGET_EFAULT;
2860             }
2861             break;
2862         default:
2863             ret = -TARGET_ENOPROTOOPT;
2864             break;
2865         }
2866         break;
2867 #ifdef SOL_NETLINK
2868     case SOL_NETLINK:
2869         switch (optname) {
2870         case NETLINK_PKTINFO:
2871         case NETLINK_BROADCAST_ERROR:
2872         case NETLINK_NO_ENOBUFS:
2873 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2874         case NETLINK_LISTEN_ALL_NSID:
2875         case NETLINK_CAP_ACK:
2876 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2878         case NETLINK_EXT_ACK:
2879 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2880 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2881         case NETLINK_GET_STRICT_CHK:
2882 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2883             if (get_user_u32(len, optlen)) {
2884                 return -TARGET_EFAULT;
2885             }
2886             if (len != sizeof(val)) {
2887                 return -TARGET_EINVAL;
2888             }
2889             lv = len;
2890             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2891             if (ret < 0) {
2892                 return ret;
2893             }
2894             if (put_user_u32(lv, optlen)
2895                 || put_user_u32(val, optval_addr)) {
2896                 return -TARGET_EFAULT;
2897             }
2898             break;
2899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2900         case NETLINK_LIST_MEMBERSHIPS:
2901         {
2902             uint32_t *results;
2903             int i;
2904             if (get_user_u32(len, optlen)) {
2905                 return -TARGET_EFAULT;
2906             }
2907             if (len < 0) {
2908                 return -TARGET_EINVAL;
2909             }
2910             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2911             if (!results && len > 0) {
2912                 return -TARGET_EFAULT;
2913             }
2914             lv = len;
2915             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2916             if (ret < 0) {
2917                 unlock_user(results, optval_addr, 0);
2918                 return ret;
2919             }
2920             /* swap host endianess to target endianess. */
2921             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2922                 results[i] = tswap32(results[i]);
2923             }
2924             if (put_user_u32(lv, optlen)) {
2925                 return -TARGET_EFAULT;
2926             }
2927             unlock_user(results, optval_addr, 0);
2928             break;
2929         }
2930 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2931         default:
2932             goto unimplemented;
2933         }
2934         break;
2935 #endif /* SOL_NETLINK */
2936     default:
2937     unimplemented:
2938         qemu_log_mask(LOG_UNIMP,
2939                       "getsockopt level=%d optname=%d not yet supported\n",
2940                       level, optname);
2941         ret = -TARGET_EOPNOTSUPP;
2942         break;
2943     }
2944     return ret;
2945 }
2946 
2947 /* Convert target low/high pair representing file offset into the host
2948  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2949  * as the kernel doesn't handle them either.
2950  */
2951 static void target_to_host_low_high(abi_ulong tlow,
2952                                     abi_ulong thigh,
2953                                     unsigned long *hlow,
2954                                     unsigned long *hhigh)
2955 {
2956     uint64_t off = tlow |
2957         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2958         TARGET_LONG_BITS / 2;
2959 
2960     *hlow = off;
2961     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2962 }
2963 
2964 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2965                                 abi_ulong count, int copy)
2966 {
2967     struct target_iovec *target_vec;
2968     struct iovec *vec;
2969     abi_ulong total_len, max_len;
2970     int i;
2971     int err = 0;
2972     bool bad_address = false;
2973 
2974     if (count == 0) {
2975         errno = 0;
2976         return NULL;
2977     }
2978     if (count > IOV_MAX) {
2979         errno = EINVAL;
2980         return NULL;
2981     }
2982 
2983     vec = g_try_new0(struct iovec, count);
2984     if (vec == NULL) {
2985         errno = ENOMEM;
2986         return NULL;
2987     }
2988 
2989     target_vec = lock_user(VERIFY_READ, target_addr,
2990                            count * sizeof(struct target_iovec), 1);
2991     if (target_vec == NULL) {
2992         err = EFAULT;
2993         goto fail2;
2994     }
2995 
2996     /* ??? If host page size > target page size, this will result in a
2997        value larger than what we can actually support.  */
2998     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2999     total_len = 0;
3000 
3001     for (i = 0; i < count; i++) {
3002         abi_ulong base = tswapal(target_vec[i].iov_base);
3003         abi_long len = tswapal(target_vec[i].iov_len);
3004 
3005         if (len < 0) {
3006             err = EINVAL;
3007             goto fail;
3008         } else if (len == 0) {
3009             /* Zero length pointer is ignored.  */
3010             vec[i].iov_base = 0;
3011         } else {
3012             vec[i].iov_base = lock_user(type, base, len, copy);
3013             /* If the first buffer pointer is bad, this is a fault.  But
3014              * subsequent bad buffers will result in a partial write; this
3015              * is realized by filling the vector with null pointers and
3016              * zero lengths. */
3017             if (!vec[i].iov_base) {
3018                 if (i == 0) {
3019                     err = EFAULT;
3020                     goto fail;
3021                 } else {
3022                     bad_address = true;
3023                 }
3024             }
3025             if (bad_address) {
3026                 len = 0;
3027             }
3028             if (len > max_len - total_len) {
3029                 len = max_len - total_len;
3030             }
3031         }
3032         vec[i].iov_len = len;
3033         total_len += len;
3034     }
3035 
3036     unlock_user(target_vec, target_addr, 0);
3037     return vec;
3038 
3039  fail:
3040     while (--i >= 0) {
3041         if (tswapal(target_vec[i].iov_len) > 0) {
3042             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3043         }
3044     }
3045     unlock_user(target_vec, target_addr, 0);
3046  fail2:
3047     g_free(vec);
3048     errno = err;
3049     return NULL;
3050 }
3051 
3052 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3053                          abi_ulong count, int copy)
3054 {
3055     struct target_iovec *target_vec;
3056     int i;
3057 
3058     target_vec = lock_user(VERIFY_READ, target_addr,
3059                            count * sizeof(struct target_iovec), 1);
3060     if (target_vec) {
3061         for (i = 0; i < count; i++) {
3062             abi_ulong base = tswapal(target_vec[i].iov_base);
3063             abi_long len = tswapal(target_vec[i].iov_len);
3064             if (len < 0) {
3065                 break;
3066             }
3067             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3068         }
3069         unlock_user(target_vec, target_addr, 0);
3070     }
3071 
3072     g_free(vec);
3073 }
3074 
3075 static inline int target_to_host_sock_type(int *type)
3076 {
3077     int host_type = 0;
3078     int target_type = *type;
3079 
3080     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3081     case TARGET_SOCK_DGRAM:
3082         host_type = SOCK_DGRAM;
3083         break;
3084     case TARGET_SOCK_STREAM:
3085         host_type = SOCK_STREAM;
3086         break;
3087     default:
3088         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3089         break;
3090     }
3091     if (target_type & TARGET_SOCK_CLOEXEC) {
3092 #if defined(SOCK_CLOEXEC)
3093         host_type |= SOCK_CLOEXEC;
3094 #else
3095         return -TARGET_EINVAL;
3096 #endif
3097     }
3098     if (target_type & TARGET_SOCK_NONBLOCK) {
3099 #if defined(SOCK_NONBLOCK)
3100         host_type |= SOCK_NONBLOCK;
3101 #elif !defined(O_NONBLOCK)
3102         return -TARGET_EINVAL;
3103 #endif
3104     }
3105     *type = host_type;
3106     return 0;
3107 }
3108 
3109 /* Try to emulate socket type flags after socket creation.  */
3110 static int sock_flags_fixup(int fd, int target_type)
3111 {
3112 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3113     if (target_type & TARGET_SOCK_NONBLOCK) {
3114         int flags = fcntl(fd, F_GETFL);
3115         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3116             close(fd);
3117             return -TARGET_EINVAL;
3118         }
3119     }
3120 #endif
3121     return fd;
3122 }
3123 
3124 /* do_socket() Must return target values and target errnos. */
3125 static abi_long do_socket(int domain, int type, int protocol)
3126 {
3127     int target_type = type;
3128     int ret;
3129 
3130     ret = target_to_host_sock_type(&type);
3131     if (ret) {
3132         return ret;
3133     }
3134 
3135     if (domain == PF_NETLINK && !(
3136 #ifdef CONFIG_RTNETLINK
3137          protocol == NETLINK_ROUTE ||
3138 #endif
3139          protocol == NETLINK_KOBJECT_UEVENT ||
3140          protocol == NETLINK_AUDIT)) {
3141         return -TARGET_EPROTONOSUPPORT;
3142     }
3143 
3144     if (domain == AF_PACKET ||
3145         (domain == AF_INET && type == SOCK_PACKET)) {
3146         protocol = tswap16(protocol);
3147     }
3148 
3149     ret = get_errno(socket(domain, type, protocol));
3150     if (ret >= 0) {
3151         ret = sock_flags_fixup(ret, target_type);
3152         if (type == SOCK_PACKET) {
3153             /* Manage an obsolete case :
3154              * if socket type is SOCK_PACKET, bind by name
3155              */
3156             fd_trans_register(ret, &target_packet_trans);
3157         } else if (domain == PF_NETLINK) {
3158             switch (protocol) {
3159 #ifdef CONFIG_RTNETLINK
3160             case NETLINK_ROUTE:
3161                 fd_trans_register(ret, &target_netlink_route_trans);
3162                 break;
3163 #endif
3164             case NETLINK_KOBJECT_UEVENT:
3165                 /* nothing to do: messages are strings */
3166                 break;
3167             case NETLINK_AUDIT:
3168                 fd_trans_register(ret, &target_netlink_audit_trans);
3169                 break;
3170             default:
3171                 g_assert_not_reached();
3172             }
3173         }
3174     }
3175     return ret;
3176 }
3177 
3178 /* do_bind() Must return target values and target errnos. */
3179 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3180                         socklen_t addrlen)
3181 {
3182     void *addr;
3183     abi_long ret;
3184 
3185     if ((int)addrlen < 0) {
3186         return -TARGET_EINVAL;
3187     }
3188 
3189     addr = alloca(addrlen+1);
3190 
3191     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3192     if (ret)
3193         return ret;
3194 
3195     return get_errno(bind(sockfd, addr, addrlen));
3196 }
3197 
3198 /* do_connect() Must return target values and target errnos. */
3199 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3200                            socklen_t addrlen)
3201 {
3202     void *addr;
3203     abi_long ret;
3204 
3205     if ((int)addrlen < 0) {
3206         return -TARGET_EINVAL;
3207     }
3208 
3209     addr = alloca(addrlen+1);
3210 
3211     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3212     if (ret)
3213         return ret;
3214 
3215     return get_errno(safe_connect(sockfd, addr, addrlen));
3216 }
3217 
3218 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3219 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3220                                       int flags, int send)
3221 {
3222     abi_long ret, len;
3223     struct msghdr msg;
3224     abi_ulong count;
3225     struct iovec *vec;
3226     abi_ulong target_vec;
3227 
3228     if (msgp->msg_name) {
3229         msg.msg_namelen = tswap32(msgp->msg_namelen);
3230         msg.msg_name = alloca(msg.msg_namelen+1);
3231         ret = target_to_host_sockaddr(fd, msg.msg_name,
3232                                       tswapal(msgp->msg_name),
3233                                       msg.msg_namelen);
3234         if (ret == -TARGET_EFAULT) {
3235             /* For connected sockets msg_name and msg_namelen must
3236              * be ignored, so returning EFAULT immediately is wrong.
3237              * Instead, pass a bad msg_name to the host kernel, and
3238              * let it decide whether to return EFAULT or not.
3239              */
3240             msg.msg_name = (void *)-1;
3241         } else if (ret) {
3242             goto out2;
3243         }
3244     } else {
3245         msg.msg_name = NULL;
3246         msg.msg_namelen = 0;
3247     }
3248     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3249     msg.msg_control = alloca(msg.msg_controllen);
3250     memset(msg.msg_control, 0, msg.msg_controllen);
3251 
3252     msg.msg_flags = tswap32(msgp->msg_flags);
3253 
3254     count = tswapal(msgp->msg_iovlen);
3255     target_vec = tswapal(msgp->msg_iov);
3256 
3257     if (count > IOV_MAX) {
3258         /* sendrcvmsg returns a different errno for this condition than
3259          * readv/writev, so we must catch it here before lock_iovec() does.
3260          */
3261         ret = -TARGET_EMSGSIZE;
3262         goto out2;
3263     }
3264 
3265     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3266                      target_vec, count, send);
3267     if (vec == NULL) {
3268         ret = -host_to_target_errno(errno);
3269         goto out2;
3270     }
3271     msg.msg_iovlen = count;
3272     msg.msg_iov = vec;
3273 
3274     if (send) {
3275         if (fd_trans_target_to_host_data(fd)) {
3276             void *host_msg;
3277 
3278             host_msg = g_malloc(msg.msg_iov->iov_len);
3279             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3280             ret = fd_trans_target_to_host_data(fd)(host_msg,
3281                                                    msg.msg_iov->iov_len);
3282             if (ret >= 0) {
3283                 msg.msg_iov->iov_base = host_msg;
3284                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3285             }
3286             g_free(host_msg);
3287         } else {
3288             ret = target_to_host_cmsg(&msg, msgp);
3289             if (ret == 0) {
3290                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3291             }
3292         }
3293     } else {
3294         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3295         if (!is_error(ret)) {
3296             len = ret;
3297             if (fd_trans_host_to_target_data(fd)) {
3298                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3299                                                MIN(msg.msg_iov->iov_len, len));
3300             } else {
3301                 ret = host_to_target_cmsg(msgp, &msg);
3302             }
3303             if (!is_error(ret)) {
3304                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3305                 msgp->msg_flags = tswap32(msg.msg_flags);
3306                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3307                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3308                                     msg.msg_name, msg.msg_namelen);
3309                     if (ret) {
3310                         goto out;
3311                     }
3312                 }
3313 
3314                 ret = len;
3315             }
3316         }
3317     }
3318 
3319 out:
3320     unlock_iovec(vec, target_vec, count, !send);
3321 out2:
3322     return ret;
3323 }
3324 
3325 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3326                                int flags, int send)
3327 {
3328     abi_long ret;
3329     struct target_msghdr *msgp;
3330 
3331     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3332                           msgp,
3333                           target_msg,
3334                           send ? 1 : 0)) {
3335         return -TARGET_EFAULT;
3336     }
3337     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3338     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3339     return ret;
3340 }
3341 
3342 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3343  * so it might not have this *mmsg-specific flag either.
3344  */
3345 #ifndef MSG_WAITFORONE
3346 #define MSG_WAITFORONE 0x10000
3347 #endif
3348 
3349 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3350                                 unsigned int vlen, unsigned int flags,
3351                                 int send)
3352 {
3353     struct target_mmsghdr *mmsgp;
3354     abi_long ret = 0;
3355     int i;
3356 
3357     if (vlen > UIO_MAXIOV) {
3358         vlen = UIO_MAXIOV;
3359     }
3360 
3361     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3362     if (!mmsgp) {
3363         return -TARGET_EFAULT;
3364     }
3365 
3366     for (i = 0; i < vlen; i++) {
3367         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3368         if (is_error(ret)) {
3369             break;
3370         }
3371         mmsgp[i].msg_len = tswap32(ret);
3372         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3373         if (flags & MSG_WAITFORONE) {
3374             flags |= MSG_DONTWAIT;
3375         }
3376     }
3377 
3378     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3379 
3380     /* Return number of datagrams sent if we sent any at all;
3381      * otherwise return the error.
3382      */
3383     if (i) {
3384         return i;
3385     }
3386     return ret;
3387 }
3388 
3389 /* do_accept4() Must return target values and target errnos. */
3390 static abi_long do_accept4(int fd, abi_ulong target_addr,
3391                            abi_ulong target_addrlen_addr, int flags)
3392 {
3393     socklen_t addrlen, ret_addrlen;
3394     void *addr;
3395     abi_long ret;
3396     int host_flags;
3397 
3398     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3399 
3400     if (target_addr == 0) {
3401         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3402     }
3403 
3404     /* linux returns EFAULT if addrlen pointer is invalid */
3405     if (get_user_u32(addrlen, target_addrlen_addr))
3406         return -TARGET_EFAULT;
3407 
3408     if ((int)addrlen < 0) {
3409         return -TARGET_EINVAL;
3410     }
3411 
3412     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3413         return -TARGET_EFAULT;
3414     }
3415 
3416     addr = alloca(addrlen);
3417 
3418     ret_addrlen = addrlen;
3419     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3420     if (!is_error(ret)) {
3421         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3422         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3423             ret = -TARGET_EFAULT;
3424         }
3425     }
3426     return ret;
3427 }
3428 
3429 /* do_getpeername() Must return target values and target errnos. */
3430 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3431                                abi_ulong target_addrlen_addr)
3432 {
3433     socklen_t addrlen, ret_addrlen;
3434     void *addr;
3435     abi_long ret;
3436 
3437     if (get_user_u32(addrlen, target_addrlen_addr))
3438         return -TARGET_EFAULT;
3439 
3440     if ((int)addrlen < 0) {
3441         return -TARGET_EINVAL;
3442     }
3443 
3444     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3445         return -TARGET_EFAULT;
3446     }
3447 
3448     addr = alloca(addrlen);
3449 
3450     ret_addrlen = addrlen;
3451     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3452     if (!is_error(ret)) {
3453         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3454         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3455             ret = -TARGET_EFAULT;
3456         }
3457     }
3458     return ret;
3459 }
3460 
3461 /* do_getsockname() Must return target values and target errnos. */
3462 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3463                                abi_ulong target_addrlen_addr)
3464 {
3465     socklen_t addrlen, ret_addrlen;
3466     void *addr;
3467     abi_long ret;
3468 
3469     if (get_user_u32(addrlen, target_addrlen_addr))
3470         return -TARGET_EFAULT;
3471 
3472     if ((int)addrlen < 0) {
3473         return -TARGET_EINVAL;
3474     }
3475 
3476     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3477         return -TARGET_EFAULT;
3478     }
3479 
3480     addr = alloca(addrlen);
3481 
3482     ret_addrlen = addrlen;
3483     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3484     if (!is_error(ret)) {
3485         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3486         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3487             ret = -TARGET_EFAULT;
3488         }
3489     }
3490     return ret;
3491 }
3492 
3493 /* do_socketpair() Must return target values and target errnos. */
3494 static abi_long do_socketpair(int domain, int type, int protocol,
3495                               abi_ulong target_tab_addr)
3496 {
3497     int tab[2];
3498     abi_long ret;
3499 
3500     target_to_host_sock_type(&type);
3501 
3502     ret = get_errno(socketpair(domain, type, protocol, tab));
3503     if (!is_error(ret)) {
3504         if (put_user_s32(tab[0], target_tab_addr)
3505             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3506             ret = -TARGET_EFAULT;
3507     }
3508     return ret;
3509 }
3510 
3511 /* do_sendto() Must return target values and target errnos. */
3512 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3513                           abi_ulong target_addr, socklen_t addrlen)
3514 {
3515     void *addr;
3516     void *host_msg;
3517     void *copy_msg = NULL;
3518     abi_long ret;
3519 
3520     if ((int)addrlen < 0) {
3521         return -TARGET_EINVAL;
3522     }
3523 
3524     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3525     if (!host_msg)
3526         return -TARGET_EFAULT;
3527     if (fd_trans_target_to_host_data(fd)) {
3528         copy_msg = host_msg;
3529         host_msg = g_malloc(len);
3530         memcpy(host_msg, copy_msg, len);
3531         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3532         if (ret < 0) {
3533             goto fail;
3534         }
3535     }
3536     if (target_addr) {
3537         addr = alloca(addrlen+1);
3538         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3539         if (ret) {
3540             goto fail;
3541         }
3542         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3543     } else {
3544         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3545     }
3546 fail:
3547     if (copy_msg) {
3548         g_free(host_msg);
3549         host_msg = copy_msg;
3550     }
3551     unlock_user(host_msg, msg, 0);
3552     return ret;
3553 }
3554 
3555 /* do_recvfrom() Must return target values and target errnos. */
3556 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3557                             abi_ulong target_addr,
3558                             abi_ulong target_addrlen)
3559 {
3560     socklen_t addrlen, ret_addrlen;
3561     void *addr;
3562     void *host_msg;
3563     abi_long ret;
3564 
3565     if (!msg) {
3566         host_msg = NULL;
3567     } else {
3568         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3569         if (!host_msg) {
3570             return -TARGET_EFAULT;
3571         }
3572     }
3573     if (target_addr) {
3574         if (get_user_u32(addrlen, target_addrlen)) {
3575             ret = -TARGET_EFAULT;
3576             goto fail;
3577         }
3578         if ((int)addrlen < 0) {
3579             ret = -TARGET_EINVAL;
3580             goto fail;
3581         }
3582         addr = alloca(addrlen);
3583         ret_addrlen = addrlen;
3584         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3585                                       addr, &ret_addrlen));
3586     } else {
3587         addr = NULL; /* To keep compiler quiet.  */
3588         addrlen = 0; /* To keep compiler quiet.  */
3589         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3590     }
3591     if (!is_error(ret)) {
3592         if (fd_trans_host_to_target_data(fd)) {
3593             abi_long trans;
3594             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3595             if (is_error(trans)) {
3596                 ret = trans;
3597                 goto fail;
3598             }
3599         }
3600         if (target_addr) {
3601             host_to_target_sockaddr(target_addr, addr,
3602                                     MIN(addrlen, ret_addrlen));
3603             if (put_user_u32(ret_addrlen, target_addrlen)) {
3604                 ret = -TARGET_EFAULT;
3605                 goto fail;
3606             }
3607         }
3608         unlock_user(host_msg, msg, len);
3609     } else {
3610 fail:
3611         unlock_user(host_msg, msg, 0);
3612     }
3613     return ret;
3614 }
3615 
3616 #ifdef TARGET_NR_socketcall
3617 /* do_socketcall() must return target values and target errnos. */
3618 static abi_long do_socketcall(int num, abi_ulong vptr)
3619 {
3620     static const unsigned nargs[] = { /* number of arguments per operation */
3621         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3622         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3623         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3624         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3625         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3626         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3627         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3628         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3629         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3630         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3631         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3632         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3633         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3634         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3635         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3636         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3637         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3638         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3639         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3640         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3641     };
3642     abi_long a[6]; /* max 6 args */
3643     unsigned i;
3644 
3645     /* check the range of the first argument num */
3646     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3647     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3648         return -TARGET_EINVAL;
3649     }
3650     /* ensure we have space for args */
3651     if (nargs[num] > ARRAY_SIZE(a)) {
3652         return -TARGET_EINVAL;
3653     }
3654     /* collect the arguments in a[] according to nargs[] */
3655     for (i = 0; i < nargs[num]; ++i) {
3656         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3657             return -TARGET_EFAULT;
3658         }
3659     }
3660     /* now when we have the args, invoke the appropriate underlying function */
3661     switch (num) {
3662     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3663         return do_socket(a[0], a[1], a[2]);
3664     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3665         return do_bind(a[0], a[1], a[2]);
3666     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3667         return do_connect(a[0], a[1], a[2]);
3668     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3669         return get_errno(listen(a[0], a[1]));
3670     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3671         return do_accept4(a[0], a[1], a[2], 0);
3672     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3673         return do_getsockname(a[0], a[1], a[2]);
3674     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3675         return do_getpeername(a[0], a[1], a[2]);
3676     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3677         return do_socketpair(a[0], a[1], a[2], a[3]);
3678     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3679         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3680     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3681         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3682     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3683         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3684     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3685         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3686     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3687         return get_errno(shutdown(a[0], a[1]));
3688     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3689         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3690     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3691         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3692     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3693         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3694     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3695         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3696     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3697         return do_accept4(a[0], a[1], a[2], a[3]);
3698     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3699         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3700     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3701         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3702     default:
3703         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3704         return -TARGET_EINVAL;
3705     }
3706 }
3707 #endif
3708 
3709 #define N_SHM_REGIONS	32
3710 
3711 static struct shm_region {
3712     abi_ulong start;
3713     abi_ulong size;
3714     bool in_use;
3715 } shm_regions[N_SHM_REGIONS];
3716 
3717 #ifndef TARGET_SEMID64_DS
3718 /* asm-generic version of this struct */
3719 struct target_semid64_ds
3720 {
3721   struct target_ipc_perm sem_perm;
3722   abi_ulong sem_otime;
3723 #if TARGET_ABI_BITS == 32
3724   abi_ulong __unused1;
3725 #endif
3726   abi_ulong sem_ctime;
3727 #if TARGET_ABI_BITS == 32
3728   abi_ulong __unused2;
3729 #endif
3730   abi_ulong sem_nsems;
3731   abi_ulong __unused3;
3732   abi_ulong __unused4;
3733 };
3734 #endif
3735 
3736 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3737                                                abi_ulong target_addr)
3738 {
3739     struct target_ipc_perm *target_ip;
3740     struct target_semid64_ds *target_sd;
3741 
3742     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3743         return -TARGET_EFAULT;
3744     target_ip = &(target_sd->sem_perm);
3745     host_ip->__key = tswap32(target_ip->__key);
3746     host_ip->uid = tswap32(target_ip->uid);
3747     host_ip->gid = tswap32(target_ip->gid);
3748     host_ip->cuid = tswap32(target_ip->cuid);
3749     host_ip->cgid = tswap32(target_ip->cgid);
3750 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3751     host_ip->mode = tswap32(target_ip->mode);
3752 #else
3753     host_ip->mode = tswap16(target_ip->mode);
3754 #endif
3755 #if defined(TARGET_PPC)
3756     host_ip->__seq = tswap32(target_ip->__seq);
3757 #else
3758     host_ip->__seq = tswap16(target_ip->__seq);
3759 #endif
3760     unlock_user_struct(target_sd, target_addr, 0);
3761     return 0;
3762 }
3763 
3764 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3765                                                struct ipc_perm *host_ip)
3766 {
3767     struct target_ipc_perm *target_ip;
3768     struct target_semid64_ds *target_sd;
3769 
3770     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3771         return -TARGET_EFAULT;
3772     target_ip = &(target_sd->sem_perm);
3773     target_ip->__key = tswap32(host_ip->__key);
3774     target_ip->uid = tswap32(host_ip->uid);
3775     target_ip->gid = tswap32(host_ip->gid);
3776     target_ip->cuid = tswap32(host_ip->cuid);
3777     target_ip->cgid = tswap32(host_ip->cgid);
3778 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3779     target_ip->mode = tswap32(host_ip->mode);
3780 #else
3781     target_ip->mode = tswap16(host_ip->mode);
3782 #endif
3783 #if defined(TARGET_PPC)
3784     target_ip->__seq = tswap32(host_ip->__seq);
3785 #else
3786     target_ip->__seq = tswap16(host_ip->__seq);
3787 #endif
3788     unlock_user_struct(target_sd, target_addr, 1);
3789     return 0;
3790 }
3791 
3792 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3793                                                abi_ulong target_addr)
3794 {
3795     struct target_semid64_ds *target_sd;
3796 
3797     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3798         return -TARGET_EFAULT;
3799     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3800         return -TARGET_EFAULT;
3801     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3802     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3803     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3804     unlock_user_struct(target_sd, target_addr, 0);
3805     return 0;
3806 }
3807 
3808 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3809                                                struct semid_ds *host_sd)
3810 {
3811     struct target_semid64_ds *target_sd;
3812 
3813     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3814         return -TARGET_EFAULT;
3815     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3816         return -TARGET_EFAULT;
3817     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3818     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3819     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3820     unlock_user_struct(target_sd, target_addr, 1);
3821     return 0;
3822 }
3823 
3824 struct target_seminfo {
3825     int semmap;
3826     int semmni;
3827     int semmns;
3828     int semmnu;
3829     int semmsl;
3830     int semopm;
3831     int semume;
3832     int semusz;
3833     int semvmx;
3834     int semaem;
3835 };
3836 
3837 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3838                                               struct seminfo *host_seminfo)
3839 {
3840     struct target_seminfo *target_seminfo;
3841     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3842         return -TARGET_EFAULT;
3843     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3844     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3845     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3846     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3847     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3848     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3849     __put_user(host_seminfo->semume, &target_seminfo->semume);
3850     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3851     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3852     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3853     unlock_user_struct(target_seminfo, target_addr, 1);
3854     return 0;
3855 }
3856 
3857 union semun {
3858 	int val;
3859 	struct semid_ds *buf;
3860 	unsigned short *array;
3861 	struct seminfo *__buf;
3862 };
3863 
3864 union target_semun {
3865 	int val;
3866 	abi_ulong buf;
3867 	abi_ulong array;
3868 	abi_ulong __buf;
3869 };
3870 
3871 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3872                                                abi_ulong target_addr)
3873 {
3874     int nsems;
3875     unsigned short *array;
3876     union semun semun;
3877     struct semid_ds semid_ds;
3878     int i, ret;
3879 
3880     semun.buf = &semid_ds;
3881 
3882     ret = semctl(semid, 0, IPC_STAT, semun);
3883     if (ret == -1)
3884         return get_errno(ret);
3885 
3886     nsems = semid_ds.sem_nsems;
3887 
3888     *host_array = g_try_new(unsigned short, nsems);
3889     if (!*host_array) {
3890         return -TARGET_ENOMEM;
3891     }
3892     array = lock_user(VERIFY_READ, target_addr,
3893                       nsems*sizeof(unsigned short), 1);
3894     if (!array) {
3895         g_free(*host_array);
3896         return -TARGET_EFAULT;
3897     }
3898 
3899     for(i=0; i<nsems; i++) {
3900         __get_user((*host_array)[i], &array[i]);
3901     }
3902     unlock_user(array, target_addr, 0);
3903 
3904     return 0;
3905 }
3906 
3907 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3908                                                unsigned short **host_array)
3909 {
3910     int nsems;
3911     unsigned short *array;
3912     union semun semun;
3913     struct semid_ds semid_ds;
3914     int i, ret;
3915 
3916     semun.buf = &semid_ds;
3917 
3918     ret = semctl(semid, 0, IPC_STAT, semun);
3919     if (ret == -1)
3920         return get_errno(ret);
3921 
3922     nsems = semid_ds.sem_nsems;
3923 
3924     array = lock_user(VERIFY_WRITE, target_addr,
3925                       nsems*sizeof(unsigned short), 0);
3926     if (!array)
3927         return -TARGET_EFAULT;
3928 
3929     for(i=0; i<nsems; i++) {
3930         __put_user((*host_array)[i], &array[i]);
3931     }
3932     g_free(*host_array);
3933     unlock_user(array, target_addr, 1);
3934 
3935     return 0;
3936 }
3937 
3938 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3939                                  abi_ulong target_arg)
3940 {
3941     union target_semun target_su = { .buf = target_arg };
3942     union semun arg;
3943     struct semid_ds dsarg;
3944     unsigned short *array = NULL;
3945     struct seminfo seminfo;
3946     abi_long ret = -TARGET_EINVAL;
3947     abi_long err;
3948     cmd &= 0xff;
3949 
3950     switch( cmd ) {
3951 	case GETVAL:
3952 	case SETVAL:
3953             /* In 64 bit cross-endian situations, we will erroneously pick up
3954              * the wrong half of the union for the "val" element.  To rectify
3955              * this, the entire 8-byte structure is byteswapped, followed by
3956 	     * a swap of the 4 byte val field. In other cases, the data is
3957 	     * already in proper host byte order. */
3958 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3959 		target_su.buf = tswapal(target_su.buf);
3960 		arg.val = tswap32(target_su.val);
3961 	    } else {
3962 		arg.val = target_su.val;
3963 	    }
3964             ret = get_errno(semctl(semid, semnum, cmd, arg));
3965             break;
3966 	case GETALL:
3967 	case SETALL:
3968             err = target_to_host_semarray(semid, &array, target_su.array);
3969             if (err)
3970                 return err;
3971             arg.array = array;
3972             ret = get_errno(semctl(semid, semnum, cmd, arg));
3973             err = host_to_target_semarray(semid, target_su.array, &array);
3974             if (err)
3975                 return err;
3976             break;
3977 	case IPC_STAT:
3978 	case IPC_SET:
3979 	case SEM_STAT:
3980             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3981             if (err)
3982                 return err;
3983             arg.buf = &dsarg;
3984             ret = get_errno(semctl(semid, semnum, cmd, arg));
3985             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3986             if (err)
3987                 return err;
3988             break;
3989 	case IPC_INFO:
3990 	case SEM_INFO:
3991             arg.__buf = &seminfo;
3992             ret = get_errno(semctl(semid, semnum, cmd, arg));
3993             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3994             if (err)
3995                 return err;
3996             break;
3997 	case IPC_RMID:
3998 	case GETPID:
3999 	case GETNCNT:
4000 	case GETZCNT:
4001             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4002             break;
4003     }
4004 
4005     return ret;
4006 }
4007 
4008 struct target_sembuf {
4009     unsigned short sem_num;
4010     short sem_op;
4011     short sem_flg;
4012 };
4013 
4014 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4015                                              abi_ulong target_addr,
4016                                              unsigned nsops)
4017 {
4018     struct target_sembuf *target_sembuf;
4019     int i;
4020 
4021     target_sembuf = lock_user(VERIFY_READ, target_addr,
4022                               nsops*sizeof(struct target_sembuf), 1);
4023     if (!target_sembuf)
4024         return -TARGET_EFAULT;
4025 
4026     for(i=0; i<nsops; i++) {
4027         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4028         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4029         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4030     }
4031 
4032     unlock_user(target_sembuf, target_addr, 0);
4033 
4034     return 0;
4035 }
4036 
4037 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4038     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4039 
4040 /*
4041  * This macro is required to handle the s390 variants, which passes the
4042  * arguments in a different order than default.
4043  */
4044 #ifdef __s390x__
4045 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4046   (__nsops), (__timeout), (__sops)
4047 #else
4048 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4049   (__nsops), 0, (__sops), (__timeout)
4050 #endif
4051 
4052 static inline abi_long do_semtimedop(int semid,
4053                                      abi_long ptr,
4054                                      unsigned nsops,
4055                                      abi_long timeout, bool time64)
4056 {
4057     struct sembuf *sops;
4058     struct timespec ts, *pts = NULL;
4059     abi_long ret;
4060 
4061     if (timeout) {
4062         pts = &ts;
4063         if (time64) {
4064             if (target_to_host_timespec64(pts, timeout)) {
4065                 return -TARGET_EFAULT;
4066             }
4067         } else {
4068             if (target_to_host_timespec(pts, timeout)) {
4069                 return -TARGET_EFAULT;
4070             }
4071         }
4072     }
4073 
4074     if (nsops > TARGET_SEMOPM) {
4075         return -TARGET_E2BIG;
4076     }
4077 
4078     sops = g_new(struct sembuf, nsops);
4079 
4080     if (target_to_host_sembuf(sops, ptr, nsops)) {
4081         g_free(sops);
4082         return -TARGET_EFAULT;
4083     }
4084 
4085     ret = -TARGET_ENOSYS;
4086 #ifdef __NR_semtimedop
4087     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4088 #endif
4089 #ifdef __NR_ipc
4090     if (ret == -TARGET_ENOSYS) {
4091         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4092                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4093     }
4094 #endif
4095     g_free(sops);
4096     return ret;
4097 }
4098 #endif
4099 
4100 struct target_msqid_ds
4101 {
4102     struct target_ipc_perm msg_perm;
4103     abi_ulong msg_stime;
4104 #if TARGET_ABI_BITS == 32
4105     abi_ulong __unused1;
4106 #endif
4107     abi_ulong msg_rtime;
4108 #if TARGET_ABI_BITS == 32
4109     abi_ulong __unused2;
4110 #endif
4111     abi_ulong msg_ctime;
4112 #if TARGET_ABI_BITS == 32
4113     abi_ulong __unused3;
4114 #endif
4115     abi_ulong __msg_cbytes;
4116     abi_ulong msg_qnum;
4117     abi_ulong msg_qbytes;
4118     abi_ulong msg_lspid;
4119     abi_ulong msg_lrpid;
4120     abi_ulong __unused4;
4121     abi_ulong __unused5;
4122 };
4123 
4124 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4125                                                abi_ulong target_addr)
4126 {
4127     struct target_msqid_ds *target_md;
4128 
4129     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4130         return -TARGET_EFAULT;
4131     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4132         return -TARGET_EFAULT;
4133     host_md->msg_stime = tswapal(target_md->msg_stime);
4134     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4135     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4136     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4137     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4138     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4139     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4140     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4141     unlock_user_struct(target_md, target_addr, 0);
4142     return 0;
4143 }
4144 
4145 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4146                                                struct msqid_ds *host_md)
4147 {
4148     struct target_msqid_ds *target_md;
4149 
4150     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4151         return -TARGET_EFAULT;
4152     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4153         return -TARGET_EFAULT;
4154     target_md->msg_stime = tswapal(host_md->msg_stime);
4155     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4156     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4157     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4158     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4159     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4160     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4161     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4162     unlock_user_struct(target_md, target_addr, 1);
4163     return 0;
4164 }
4165 
4166 struct target_msginfo {
4167     int msgpool;
4168     int msgmap;
4169     int msgmax;
4170     int msgmnb;
4171     int msgmni;
4172     int msgssz;
4173     int msgtql;
4174     unsigned short int msgseg;
4175 };
4176 
4177 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4178                                               struct msginfo *host_msginfo)
4179 {
4180     struct target_msginfo *target_msginfo;
4181     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4182         return -TARGET_EFAULT;
4183     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4184     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4185     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4186     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4187     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4188     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4189     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4190     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4191     unlock_user_struct(target_msginfo, target_addr, 1);
4192     return 0;
4193 }
4194 
4195 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4196 {
4197     struct msqid_ds dsarg;
4198     struct msginfo msginfo;
4199     abi_long ret = -TARGET_EINVAL;
4200 
4201     cmd &= 0xff;
4202 
4203     switch (cmd) {
4204     case IPC_STAT:
4205     case IPC_SET:
4206     case MSG_STAT:
4207         if (target_to_host_msqid_ds(&dsarg,ptr))
4208             return -TARGET_EFAULT;
4209         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4210         if (host_to_target_msqid_ds(ptr,&dsarg))
4211             return -TARGET_EFAULT;
4212         break;
4213     case IPC_RMID:
4214         ret = get_errno(msgctl(msgid, cmd, NULL));
4215         break;
4216     case IPC_INFO:
4217     case MSG_INFO:
4218         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4219         if (host_to_target_msginfo(ptr, &msginfo))
4220             return -TARGET_EFAULT;
4221         break;
4222     }
4223 
4224     return ret;
4225 }
4226 
4227 struct target_msgbuf {
4228     abi_long mtype;
4229     char	mtext[1];
4230 };
4231 
4232 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4233                                  ssize_t msgsz, int msgflg)
4234 {
4235     struct target_msgbuf *target_mb;
4236     struct msgbuf *host_mb;
4237     abi_long ret = 0;
4238 
4239     if (msgsz < 0) {
4240         return -TARGET_EINVAL;
4241     }
4242 
4243     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4244         return -TARGET_EFAULT;
4245     host_mb = g_try_malloc(msgsz + sizeof(long));
4246     if (!host_mb) {
4247         unlock_user_struct(target_mb, msgp, 0);
4248         return -TARGET_ENOMEM;
4249     }
4250     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4251     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4252     ret = -TARGET_ENOSYS;
4253 #ifdef __NR_msgsnd
4254     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4255 #endif
4256 #ifdef __NR_ipc
4257     if (ret == -TARGET_ENOSYS) {
4258 #ifdef __s390x__
4259         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4260                                  host_mb));
4261 #else
4262         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4263                                  host_mb, 0));
4264 #endif
4265     }
4266 #endif
4267     g_free(host_mb);
4268     unlock_user_struct(target_mb, msgp, 0);
4269 
4270     return ret;
4271 }
4272 
4273 #ifdef __NR_ipc
4274 #if defined(__sparc__)
4275 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4276 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4277 #elif defined(__s390x__)
4278 /* The s390 sys_ipc variant has only five parameters.  */
4279 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4280     ((long int[]){(long int)__msgp, __msgtyp})
4281 #else
4282 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4283     ((long int[]){(long int)__msgp, __msgtyp}), 0
4284 #endif
4285 #endif
4286 
4287 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4288                                  ssize_t msgsz, abi_long msgtyp,
4289                                  int msgflg)
4290 {
4291     struct target_msgbuf *target_mb;
4292     char *target_mtext;
4293     struct msgbuf *host_mb;
4294     abi_long ret = 0;
4295 
4296     if (msgsz < 0) {
4297         return -TARGET_EINVAL;
4298     }
4299 
4300     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4301         return -TARGET_EFAULT;
4302 
4303     host_mb = g_try_malloc(msgsz + sizeof(long));
4304     if (!host_mb) {
4305         ret = -TARGET_ENOMEM;
4306         goto end;
4307     }
4308     ret = -TARGET_ENOSYS;
4309 #ifdef __NR_msgrcv
4310     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4311 #endif
4312 #ifdef __NR_ipc
4313     if (ret == -TARGET_ENOSYS) {
4314         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4315                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4316     }
4317 #endif
4318 
4319     if (ret > 0) {
4320         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4321         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4322         if (!target_mtext) {
4323             ret = -TARGET_EFAULT;
4324             goto end;
4325         }
4326         memcpy(target_mb->mtext, host_mb->mtext, ret);
4327         unlock_user(target_mtext, target_mtext_addr, ret);
4328     }
4329 
4330     target_mb->mtype = tswapal(host_mb->mtype);
4331 
4332 end:
4333     if (target_mb)
4334         unlock_user_struct(target_mb, msgp, 1);
4335     g_free(host_mb);
4336     return ret;
4337 }
4338 
4339 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4340                                                abi_ulong target_addr)
4341 {
4342     struct target_shmid_ds *target_sd;
4343 
4344     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4345         return -TARGET_EFAULT;
4346     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4347         return -TARGET_EFAULT;
4348     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4349     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4350     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4351     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4352     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4353     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4354     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4355     unlock_user_struct(target_sd, target_addr, 0);
4356     return 0;
4357 }
4358 
4359 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4360                                                struct shmid_ds *host_sd)
4361 {
4362     struct target_shmid_ds *target_sd;
4363 
4364     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4365         return -TARGET_EFAULT;
4366     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4367         return -TARGET_EFAULT;
4368     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4369     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4370     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4371     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4372     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4373     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4374     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4375     unlock_user_struct(target_sd, target_addr, 1);
4376     return 0;
4377 }
4378 
4379 struct  target_shminfo {
4380     abi_ulong shmmax;
4381     abi_ulong shmmin;
4382     abi_ulong shmmni;
4383     abi_ulong shmseg;
4384     abi_ulong shmall;
4385 };
4386 
4387 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4388                                               struct shminfo *host_shminfo)
4389 {
4390     struct target_shminfo *target_shminfo;
4391     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4392         return -TARGET_EFAULT;
4393     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4394     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4395     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4396     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4397     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4398     unlock_user_struct(target_shminfo, target_addr, 1);
4399     return 0;
4400 }
4401 
4402 struct target_shm_info {
4403     int used_ids;
4404     abi_ulong shm_tot;
4405     abi_ulong shm_rss;
4406     abi_ulong shm_swp;
4407     abi_ulong swap_attempts;
4408     abi_ulong swap_successes;
4409 };
4410 
4411 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4412                                                struct shm_info *host_shm_info)
4413 {
4414     struct target_shm_info *target_shm_info;
4415     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4416         return -TARGET_EFAULT;
4417     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4418     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4419     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4420     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4421     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4422     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4423     unlock_user_struct(target_shm_info, target_addr, 1);
4424     return 0;
4425 }
4426 
4427 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4428 {
4429     struct shmid_ds dsarg;
4430     struct shminfo shminfo;
4431     struct shm_info shm_info;
4432     abi_long ret = -TARGET_EINVAL;
4433 
4434     cmd &= 0xff;
4435 
4436     switch(cmd) {
4437     case IPC_STAT:
4438     case IPC_SET:
4439     case SHM_STAT:
4440         if (target_to_host_shmid_ds(&dsarg, buf))
4441             return -TARGET_EFAULT;
4442         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4443         if (host_to_target_shmid_ds(buf, &dsarg))
4444             return -TARGET_EFAULT;
4445         break;
4446     case IPC_INFO:
4447         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4448         if (host_to_target_shminfo(buf, &shminfo))
4449             return -TARGET_EFAULT;
4450         break;
4451     case SHM_INFO:
4452         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4453         if (host_to_target_shm_info(buf, &shm_info))
4454             return -TARGET_EFAULT;
4455         break;
4456     case IPC_RMID:
4457     case SHM_LOCK:
4458     case SHM_UNLOCK:
4459         ret = get_errno(shmctl(shmid, cmd, NULL));
4460         break;
4461     }
4462 
4463     return ret;
4464 }
4465 
4466 #ifndef TARGET_FORCE_SHMLBA
4467 /* For most architectures, SHMLBA is the same as the page size;
4468  * some architectures have larger values, in which case they should
4469  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4470  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4471  * and defining its own value for SHMLBA.
4472  *
4473  * The kernel also permits SHMLBA to be set by the architecture to a
4474  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4475  * this means that addresses are rounded to the large size if
4476  * SHM_RND is set but addresses not aligned to that size are not rejected
4477  * as long as they are at least page-aligned. Since the only architecture
4478  * which uses this is ia64 this code doesn't provide for that oddity.
4479  */
4480 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4481 {
4482     return TARGET_PAGE_SIZE;
4483 }
4484 #endif
4485 
4486 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4487                                  int shmid, abi_ulong shmaddr, int shmflg)
4488 {
4489     CPUState *cpu = env_cpu(cpu_env);
4490     abi_long raddr;
4491     void *host_raddr;
4492     struct shmid_ds shm_info;
4493     int i,ret;
4494     abi_ulong shmlba;
4495 
4496     /* shmat pointers are always untagged */
4497 
4498     /* find out the length of the shared memory segment */
4499     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4500     if (is_error(ret)) {
4501         /* can't get length, bail out */
4502         return ret;
4503     }
4504 
4505     shmlba = target_shmlba(cpu_env);
4506 
4507     if (shmaddr & (shmlba - 1)) {
4508         if (shmflg & SHM_RND) {
4509             shmaddr &= ~(shmlba - 1);
4510         } else {
4511             return -TARGET_EINVAL;
4512         }
4513     }
4514     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4515         return -TARGET_EINVAL;
4516     }
4517 
4518     mmap_lock();
4519 
4520     /*
4521      * We're mapping shared memory, so ensure we generate code for parallel
4522      * execution and flush old translations.  This will work up to the level
4523      * supported by the host -- anything that requires EXCP_ATOMIC will not
4524      * be atomic with respect to an external process.
4525      */
4526     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4527         cpu->tcg_cflags |= CF_PARALLEL;
4528         tb_flush(cpu);
4529     }
4530 
4531     if (shmaddr)
4532         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4533     else {
4534         abi_ulong mmap_start;
4535 
4536         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4537         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4538 
4539         if (mmap_start == -1) {
4540             errno = ENOMEM;
4541             host_raddr = (void *)-1;
4542         } else
4543             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4544                                shmflg | SHM_REMAP);
4545     }
4546 
4547     if (host_raddr == (void *)-1) {
4548         mmap_unlock();
4549         return get_errno((long)host_raddr);
4550     }
4551     raddr=h2g((unsigned long)host_raddr);
4552 
4553     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4554                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4555                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4556 
4557     for (i = 0; i < N_SHM_REGIONS; i++) {
4558         if (!shm_regions[i].in_use) {
4559             shm_regions[i].in_use = true;
4560             shm_regions[i].start = raddr;
4561             shm_regions[i].size = shm_info.shm_segsz;
4562             break;
4563         }
4564     }
4565 
4566     mmap_unlock();
4567     return raddr;
4568 
4569 }
4570 
4571 static inline abi_long do_shmdt(abi_ulong shmaddr)
4572 {
4573     int i;
4574     abi_long rv;
4575 
4576     /* shmdt pointers are always untagged */
4577 
4578     mmap_lock();
4579 
4580     for (i = 0; i < N_SHM_REGIONS; ++i) {
4581         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4582             shm_regions[i].in_use = false;
4583             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4584             break;
4585         }
4586     }
4587     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4588 
4589     mmap_unlock();
4590 
4591     return rv;
4592 }
4593 
4594 #ifdef TARGET_NR_ipc
4595 /* ??? This only works with linear mappings.  */
4596 /* do_ipc() must return target values and target errnos. */
4597 static abi_long do_ipc(CPUArchState *cpu_env,
4598                        unsigned int call, abi_long first,
4599                        abi_long second, abi_long third,
4600                        abi_long ptr, abi_long fifth)
4601 {
4602     int version;
4603     abi_long ret = 0;
4604 
4605     version = call >> 16;
4606     call &= 0xffff;
4607 
4608     switch (call) {
4609     case IPCOP_semop:
4610         ret = do_semtimedop(first, ptr, second, 0, false);
4611         break;
4612     case IPCOP_semtimedop:
4613     /*
4614      * The s390 sys_ipc variant has only five parameters instead of six
4615      * (as for default variant) and the only difference is the handling of
4616      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4617      * to a struct timespec where the generic variant uses fifth parameter.
4618      */
4619 #if defined(TARGET_S390X)
4620         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4621 #else
4622         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4623 #endif
4624         break;
4625 
4626     case IPCOP_semget:
4627         ret = get_errno(semget(first, second, third));
4628         break;
4629 
4630     case IPCOP_semctl: {
4631         /* The semun argument to semctl is passed by value, so dereference the
4632          * ptr argument. */
4633         abi_ulong atptr;
4634         get_user_ual(atptr, ptr);
4635         ret = do_semctl(first, second, third, atptr);
4636         break;
4637     }
4638 
4639     case IPCOP_msgget:
4640         ret = get_errno(msgget(first, second));
4641         break;
4642 
4643     case IPCOP_msgsnd:
4644         ret = do_msgsnd(first, ptr, second, third);
4645         break;
4646 
4647     case IPCOP_msgctl:
4648         ret = do_msgctl(first, second, ptr);
4649         break;
4650 
4651     case IPCOP_msgrcv:
4652         switch (version) {
4653         case 0:
4654             {
4655                 struct target_ipc_kludge {
4656                     abi_long msgp;
4657                     abi_long msgtyp;
4658                 } *tmp;
4659 
4660                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4661                     ret = -TARGET_EFAULT;
4662                     break;
4663                 }
4664 
4665                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4666 
4667                 unlock_user_struct(tmp, ptr, 0);
4668                 break;
4669             }
4670         default:
4671             ret = do_msgrcv(first, ptr, second, fifth, third);
4672         }
4673         break;
4674 
4675     case IPCOP_shmat:
4676         switch (version) {
4677         default:
4678         {
4679             abi_ulong raddr;
4680             raddr = do_shmat(cpu_env, first, ptr, second);
4681             if (is_error(raddr))
4682                 return get_errno(raddr);
4683             if (put_user_ual(raddr, third))
4684                 return -TARGET_EFAULT;
4685             break;
4686         }
4687         case 1:
4688             ret = -TARGET_EINVAL;
4689             break;
4690         }
4691 	break;
4692     case IPCOP_shmdt:
4693         ret = do_shmdt(ptr);
4694 	break;
4695 
4696     case IPCOP_shmget:
4697 	/* IPC_* flag values are the same on all linux platforms */
4698 	ret = get_errno(shmget(first, second, third));
4699 	break;
4700 
4701 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4702     case IPCOP_shmctl:
4703         ret = do_shmctl(first, second, ptr);
4704         break;
4705     default:
4706         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4707                       call, version);
4708 	ret = -TARGET_ENOSYS;
4709 	break;
4710     }
4711     return ret;
4712 }
4713 #endif
4714 
4715 /* kernel structure types definitions */
4716 
4717 #define STRUCT(name, ...) STRUCT_ ## name,
4718 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4719 enum {
4720 #include "syscall_types.h"
4721 STRUCT_MAX
4722 };
4723 #undef STRUCT
4724 #undef STRUCT_SPECIAL
4725 
4726 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4727 #define STRUCT_SPECIAL(name)
4728 #include "syscall_types.h"
4729 #undef STRUCT
4730 #undef STRUCT_SPECIAL
4731 
4732 #define MAX_STRUCT_SIZE 4096
4733 
4734 #ifdef CONFIG_FIEMAP
4735 /* So fiemap access checks don't overflow on 32 bit systems.
4736  * This is very slightly smaller than the limit imposed by
4737  * the underlying kernel.
4738  */
4739 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4740                             / sizeof(struct fiemap_extent))
4741 
4742 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4743                                        int fd, int cmd, abi_long arg)
4744 {
4745     /* The parameter for this ioctl is a struct fiemap followed
4746      * by an array of struct fiemap_extent whose size is set
4747      * in fiemap->fm_extent_count. The array is filled in by the
4748      * ioctl.
4749      */
4750     int target_size_in, target_size_out;
4751     struct fiemap *fm;
4752     const argtype *arg_type = ie->arg_type;
4753     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4754     void *argptr, *p;
4755     abi_long ret;
4756     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4757     uint32_t outbufsz;
4758     int free_fm = 0;
4759 
4760     assert(arg_type[0] == TYPE_PTR);
4761     assert(ie->access == IOC_RW);
4762     arg_type++;
4763     target_size_in = thunk_type_size(arg_type, 0);
4764     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4765     if (!argptr) {
4766         return -TARGET_EFAULT;
4767     }
4768     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4769     unlock_user(argptr, arg, 0);
4770     fm = (struct fiemap *)buf_temp;
4771     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4772         return -TARGET_EINVAL;
4773     }
4774 
4775     outbufsz = sizeof (*fm) +
4776         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4777 
4778     if (outbufsz > MAX_STRUCT_SIZE) {
4779         /* We can't fit all the extents into the fixed size buffer.
4780          * Allocate one that is large enough and use it instead.
4781          */
4782         fm = g_try_malloc(outbufsz);
4783         if (!fm) {
4784             return -TARGET_ENOMEM;
4785         }
4786         memcpy(fm, buf_temp, sizeof(struct fiemap));
4787         free_fm = 1;
4788     }
4789     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4790     if (!is_error(ret)) {
4791         target_size_out = target_size_in;
4792         /* An extent_count of 0 means we were only counting the extents
4793          * so there are no structs to copy
4794          */
4795         if (fm->fm_extent_count != 0) {
4796             target_size_out += fm->fm_mapped_extents * extent_size;
4797         }
4798         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4799         if (!argptr) {
4800             ret = -TARGET_EFAULT;
4801         } else {
4802             /* Convert the struct fiemap */
4803             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4804             if (fm->fm_extent_count != 0) {
4805                 p = argptr + target_size_in;
4806                 /* ...and then all the struct fiemap_extents */
4807                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4808                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4809                                   THUNK_TARGET);
4810                     p += extent_size;
4811                 }
4812             }
4813             unlock_user(argptr, arg, target_size_out);
4814         }
4815     }
4816     if (free_fm) {
4817         g_free(fm);
4818     }
4819     return ret;
4820 }
4821 #endif
4822 
4823 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4824                                 int fd, int cmd, abi_long arg)
4825 {
4826     const argtype *arg_type = ie->arg_type;
4827     int target_size;
4828     void *argptr;
4829     int ret;
4830     struct ifconf *host_ifconf;
4831     uint32_t outbufsz;
4832     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4833     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4834     int target_ifreq_size;
4835     int nb_ifreq;
4836     int free_buf = 0;
4837     int i;
4838     int target_ifc_len;
4839     abi_long target_ifc_buf;
4840     int host_ifc_len;
4841     char *host_ifc_buf;
4842 
4843     assert(arg_type[0] == TYPE_PTR);
4844     assert(ie->access == IOC_RW);
4845 
4846     arg_type++;
4847     target_size = thunk_type_size(arg_type, 0);
4848 
4849     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4850     if (!argptr)
4851         return -TARGET_EFAULT;
4852     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4853     unlock_user(argptr, arg, 0);
4854 
4855     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4856     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4857     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4858 
4859     if (target_ifc_buf != 0) {
4860         target_ifc_len = host_ifconf->ifc_len;
4861         nb_ifreq = target_ifc_len / target_ifreq_size;
4862         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4863 
4864         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4865         if (outbufsz > MAX_STRUCT_SIZE) {
4866             /*
4867              * We can't fit all the extents into the fixed size buffer.
4868              * Allocate one that is large enough and use it instead.
4869              */
4870             host_ifconf = malloc(outbufsz);
4871             if (!host_ifconf) {
4872                 return -TARGET_ENOMEM;
4873             }
4874             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4875             free_buf = 1;
4876         }
4877         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4878 
4879         host_ifconf->ifc_len = host_ifc_len;
4880     } else {
4881       host_ifc_buf = NULL;
4882     }
4883     host_ifconf->ifc_buf = host_ifc_buf;
4884 
4885     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4886     if (!is_error(ret)) {
4887 	/* convert host ifc_len to target ifc_len */
4888 
4889         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4890         target_ifc_len = nb_ifreq * target_ifreq_size;
4891         host_ifconf->ifc_len = target_ifc_len;
4892 
4893 	/* restore target ifc_buf */
4894 
4895         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4896 
4897 	/* copy struct ifconf to target user */
4898 
4899         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4900         if (!argptr)
4901             return -TARGET_EFAULT;
4902         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4903         unlock_user(argptr, arg, target_size);
4904 
4905         if (target_ifc_buf != 0) {
4906             /* copy ifreq[] to target user */
4907             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4908             for (i = 0; i < nb_ifreq ; i++) {
4909                 thunk_convert(argptr + i * target_ifreq_size,
4910                               host_ifc_buf + i * sizeof(struct ifreq),
4911                               ifreq_arg_type, THUNK_TARGET);
4912             }
4913             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4914         }
4915     }
4916 
4917     if (free_buf) {
4918         free(host_ifconf);
4919     }
4920 
4921     return ret;
4922 }
4923 
4924 #if defined(CONFIG_USBFS)
4925 #if HOST_LONG_BITS > 64
4926 #error USBDEVFS thunks do not support >64 bit hosts yet.
4927 #endif
4928 struct live_urb {
4929     uint64_t target_urb_adr;
4930     uint64_t target_buf_adr;
4931     char *target_buf_ptr;
4932     struct usbdevfs_urb host_urb;
4933 };
4934 
4935 static GHashTable *usbdevfs_urb_hashtable(void)
4936 {
4937     static GHashTable *urb_hashtable;
4938 
4939     if (!urb_hashtable) {
4940         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4941     }
4942     return urb_hashtable;
4943 }
4944 
4945 static void urb_hashtable_insert(struct live_urb *urb)
4946 {
4947     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4948     g_hash_table_insert(urb_hashtable, urb, urb);
4949 }
4950 
4951 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4952 {
4953     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4954     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4955 }
4956 
4957 static void urb_hashtable_remove(struct live_urb *urb)
4958 {
4959     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4960     g_hash_table_remove(urb_hashtable, urb);
4961 }
4962 
4963 static abi_long
4964 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4965                           int fd, int cmd, abi_long arg)
4966 {
4967     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4968     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4969     struct live_urb *lurb;
4970     void *argptr;
4971     uint64_t hurb;
4972     int target_size;
4973     uintptr_t target_urb_adr;
4974     abi_long ret;
4975 
4976     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4977 
4978     memset(buf_temp, 0, sizeof(uint64_t));
4979     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4980     if (is_error(ret)) {
4981         return ret;
4982     }
4983 
4984     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4985     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4986     if (!lurb->target_urb_adr) {
4987         return -TARGET_EFAULT;
4988     }
4989     urb_hashtable_remove(lurb);
4990     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4991         lurb->host_urb.buffer_length);
4992     lurb->target_buf_ptr = NULL;
4993 
4994     /* restore the guest buffer pointer */
4995     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4996 
4997     /* update the guest urb struct */
4998     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4999     if (!argptr) {
5000         g_free(lurb);
5001         return -TARGET_EFAULT;
5002     }
5003     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5004     unlock_user(argptr, lurb->target_urb_adr, target_size);
5005 
5006     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5007     /* write back the urb handle */
5008     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5009     if (!argptr) {
5010         g_free(lurb);
5011         return -TARGET_EFAULT;
5012     }
5013 
5014     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5015     target_urb_adr = lurb->target_urb_adr;
5016     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5017     unlock_user(argptr, arg, target_size);
5018 
5019     g_free(lurb);
5020     return ret;
5021 }
5022 
5023 static abi_long
5024 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5025                              uint8_t *buf_temp __attribute__((unused)),
5026                              int fd, int cmd, abi_long arg)
5027 {
5028     struct live_urb *lurb;
5029 
5030     /* map target address back to host URB with metadata. */
5031     lurb = urb_hashtable_lookup(arg);
5032     if (!lurb) {
5033         return -TARGET_EFAULT;
5034     }
5035     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5036 }
5037 
5038 static abi_long
5039 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5040                             int fd, int cmd, abi_long arg)
5041 {
5042     const argtype *arg_type = ie->arg_type;
5043     int target_size;
5044     abi_long ret;
5045     void *argptr;
5046     int rw_dir;
5047     struct live_urb *lurb;
5048 
5049     /*
5050      * each submitted URB needs to map to a unique ID for the
5051      * kernel, and that unique ID needs to be a pointer to
5052      * host memory.  hence, we need to malloc for each URB.
5053      * isochronous transfers have a variable length struct.
5054      */
5055     arg_type++;
5056     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5057 
5058     /* construct host copy of urb and metadata */
5059     lurb = g_try_malloc0(sizeof(struct live_urb));
5060     if (!lurb) {
5061         return -TARGET_ENOMEM;
5062     }
5063 
5064     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5065     if (!argptr) {
5066         g_free(lurb);
5067         return -TARGET_EFAULT;
5068     }
5069     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5070     unlock_user(argptr, arg, 0);
5071 
5072     lurb->target_urb_adr = arg;
5073     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5074 
5075     /* buffer space used depends on endpoint type so lock the entire buffer */
5076     /* control type urbs should check the buffer contents for true direction */
5077     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5078     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5079         lurb->host_urb.buffer_length, 1);
5080     if (lurb->target_buf_ptr == NULL) {
5081         g_free(lurb);
5082         return -TARGET_EFAULT;
5083     }
5084 
5085     /* update buffer pointer in host copy */
5086     lurb->host_urb.buffer = lurb->target_buf_ptr;
5087 
5088     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5089     if (is_error(ret)) {
5090         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5091         g_free(lurb);
5092     } else {
5093         urb_hashtable_insert(lurb);
5094     }
5095 
5096     return ret;
5097 }
5098 #endif /* CONFIG_USBFS */
5099 
5100 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5101                             int cmd, abi_long arg)
5102 {
5103     void *argptr;
5104     struct dm_ioctl *host_dm;
5105     abi_long guest_data;
5106     uint32_t guest_data_size;
5107     int target_size;
5108     const argtype *arg_type = ie->arg_type;
5109     abi_long ret;
5110     void *big_buf = NULL;
5111     char *host_data;
5112 
5113     arg_type++;
5114     target_size = thunk_type_size(arg_type, 0);
5115     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5116     if (!argptr) {
5117         ret = -TARGET_EFAULT;
5118         goto out;
5119     }
5120     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5121     unlock_user(argptr, arg, 0);
5122 
5123     /* buf_temp is too small, so fetch things into a bigger buffer */
5124     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5125     memcpy(big_buf, buf_temp, target_size);
5126     buf_temp = big_buf;
5127     host_dm = big_buf;
5128 
5129     guest_data = arg + host_dm->data_start;
5130     if ((guest_data - arg) < 0) {
5131         ret = -TARGET_EINVAL;
5132         goto out;
5133     }
5134     guest_data_size = host_dm->data_size - host_dm->data_start;
5135     host_data = (char*)host_dm + host_dm->data_start;
5136 
5137     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5138     if (!argptr) {
5139         ret = -TARGET_EFAULT;
5140         goto out;
5141     }
5142 
5143     switch (ie->host_cmd) {
5144     case DM_REMOVE_ALL:
5145     case DM_LIST_DEVICES:
5146     case DM_DEV_CREATE:
5147     case DM_DEV_REMOVE:
5148     case DM_DEV_SUSPEND:
5149     case DM_DEV_STATUS:
5150     case DM_DEV_WAIT:
5151     case DM_TABLE_STATUS:
5152     case DM_TABLE_CLEAR:
5153     case DM_TABLE_DEPS:
5154     case DM_LIST_VERSIONS:
5155         /* no input data */
5156         break;
5157     case DM_DEV_RENAME:
5158     case DM_DEV_SET_GEOMETRY:
5159         /* data contains only strings */
5160         memcpy(host_data, argptr, guest_data_size);
5161         break;
5162     case DM_TARGET_MSG:
5163         memcpy(host_data, argptr, guest_data_size);
5164         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5165         break;
5166     case DM_TABLE_LOAD:
5167     {
5168         void *gspec = argptr;
5169         void *cur_data = host_data;
5170         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5171         int spec_size = thunk_type_size(arg_type, 0);
5172         int i;
5173 
5174         for (i = 0; i < host_dm->target_count; i++) {
5175             struct dm_target_spec *spec = cur_data;
5176             uint32_t next;
5177             int slen;
5178 
5179             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5180             slen = strlen((char*)gspec + spec_size) + 1;
5181             next = spec->next;
5182             spec->next = sizeof(*spec) + slen;
5183             strcpy((char*)&spec[1], gspec + spec_size);
5184             gspec += next;
5185             cur_data += spec->next;
5186         }
5187         break;
5188     }
5189     default:
5190         ret = -TARGET_EINVAL;
5191         unlock_user(argptr, guest_data, 0);
5192         goto out;
5193     }
5194     unlock_user(argptr, guest_data, 0);
5195 
5196     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5197     if (!is_error(ret)) {
5198         guest_data = arg + host_dm->data_start;
5199         guest_data_size = host_dm->data_size - host_dm->data_start;
5200         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5201         switch (ie->host_cmd) {
5202         case DM_REMOVE_ALL:
5203         case DM_DEV_CREATE:
5204         case DM_DEV_REMOVE:
5205         case DM_DEV_RENAME:
5206         case DM_DEV_SUSPEND:
5207         case DM_DEV_STATUS:
5208         case DM_TABLE_LOAD:
5209         case DM_TABLE_CLEAR:
5210         case DM_TARGET_MSG:
5211         case DM_DEV_SET_GEOMETRY:
5212             /* no return data */
5213             break;
5214         case DM_LIST_DEVICES:
5215         {
5216             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5217             uint32_t remaining_data = guest_data_size;
5218             void *cur_data = argptr;
5219             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5220             int nl_size = 12; /* can't use thunk_size due to alignment */
5221 
5222             while (1) {
5223                 uint32_t next = nl->next;
5224                 if (next) {
5225                     nl->next = nl_size + (strlen(nl->name) + 1);
5226                 }
5227                 if (remaining_data < nl->next) {
5228                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5229                     break;
5230                 }
5231                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5232                 strcpy(cur_data + nl_size, nl->name);
5233                 cur_data += nl->next;
5234                 remaining_data -= nl->next;
5235                 if (!next) {
5236                     break;
5237                 }
5238                 nl = (void*)nl + next;
5239             }
5240             break;
5241         }
5242         case DM_DEV_WAIT:
5243         case DM_TABLE_STATUS:
5244         {
5245             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5246             void *cur_data = argptr;
5247             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5248             int spec_size = thunk_type_size(arg_type, 0);
5249             int i;
5250 
5251             for (i = 0; i < host_dm->target_count; i++) {
5252                 uint32_t next = spec->next;
5253                 int slen = strlen((char*)&spec[1]) + 1;
5254                 spec->next = (cur_data - argptr) + spec_size + slen;
5255                 if (guest_data_size < spec->next) {
5256                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5257                     break;
5258                 }
5259                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5260                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5261                 cur_data = argptr + spec->next;
5262                 spec = (void*)host_dm + host_dm->data_start + next;
5263             }
5264             break;
5265         }
5266         case DM_TABLE_DEPS:
5267         {
5268             void *hdata = (void*)host_dm + host_dm->data_start;
5269             int count = *(uint32_t*)hdata;
5270             uint64_t *hdev = hdata + 8;
5271             uint64_t *gdev = argptr + 8;
5272             int i;
5273 
5274             *(uint32_t*)argptr = tswap32(count);
5275             for (i = 0; i < count; i++) {
5276                 *gdev = tswap64(*hdev);
5277                 gdev++;
5278                 hdev++;
5279             }
5280             break;
5281         }
5282         case DM_LIST_VERSIONS:
5283         {
5284             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5285             uint32_t remaining_data = guest_data_size;
5286             void *cur_data = argptr;
5287             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5288             int vers_size = thunk_type_size(arg_type, 0);
5289 
5290             while (1) {
5291                 uint32_t next = vers->next;
5292                 if (next) {
5293                     vers->next = vers_size + (strlen(vers->name) + 1);
5294                 }
5295                 if (remaining_data < vers->next) {
5296                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5297                     break;
5298                 }
5299                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5300                 strcpy(cur_data + vers_size, vers->name);
5301                 cur_data += vers->next;
5302                 remaining_data -= vers->next;
5303                 if (!next) {
5304                     break;
5305                 }
5306                 vers = (void*)vers + next;
5307             }
5308             break;
5309         }
5310         default:
5311             unlock_user(argptr, guest_data, 0);
5312             ret = -TARGET_EINVAL;
5313             goto out;
5314         }
5315         unlock_user(argptr, guest_data, guest_data_size);
5316 
5317         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5318         if (!argptr) {
5319             ret = -TARGET_EFAULT;
5320             goto out;
5321         }
5322         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5323         unlock_user(argptr, arg, target_size);
5324     }
5325 out:
5326     g_free(big_buf);
5327     return ret;
5328 }
5329 
5330 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5331                                int cmd, abi_long arg)
5332 {
5333     void *argptr;
5334     int target_size;
5335     const argtype *arg_type = ie->arg_type;
5336     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5337     abi_long ret;
5338 
5339     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5340     struct blkpg_partition host_part;
5341 
5342     /* Read and convert blkpg */
5343     arg_type++;
5344     target_size = thunk_type_size(arg_type, 0);
5345     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5346     if (!argptr) {
5347         ret = -TARGET_EFAULT;
5348         goto out;
5349     }
5350     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5351     unlock_user(argptr, arg, 0);
5352 
5353     switch (host_blkpg->op) {
5354     case BLKPG_ADD_PARTITION:
5355     case BLKPG_DEL_PARTITION:
5356         /* payload is struct blkpg_partition */
5357         break;
5358     default:
5359         /* Unknown opcode */
5360         ret = -TARGET_EINVAL;
5361         goto out;
5362     }
5363 
5364     /* Read and convert blkpg->data */
5365     arg = (abi_long)(uintptr_t)host_blkpg->data;
5366     target_size = thunk_type_size(part_arg_type, 0);
5367     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5368     if (!argptr) {
5369         ret = -TARGET_EFAULT;
5370         goto out;
5371     }
5372     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5373     unlock_user(argptr, arg, 0);
5374 
5375     /* Swizzle the data pointer to our local copy and call! */
5376     host_blkpg->data = &host_part;
5377     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5378 
5379 out:
5380     return ret;
5381 }
5382 
5383 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5384                                 int fd, int cmd, abi_long arg)
5385 {
5386     const argtype *arg_type = ie->arg_type;
5387     const StructEntry *se;
5388     const argtype *field_types;
5389     const int *dst_offsets, *src_offsets;
5390     int target_size;
5391     void *argptr;
5392     abi_ulong *target_rt_dev_ptr = NULL;
5393     unsigned long *host_rt_dev_ptr = NULL;
5394     abi_long ret;
5395     int i;
5396 
5397     assert(ie->access == IOC_W);
5398     assert(*arg_type == TYPE_PTR);
5399     arg_type++;
5400     assert(*arg_type == TYPE_STRUCT);
5401     target_size = thunk_type_size(arg_type, 0);
5402     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5403     if (!argptr) {
5404         return -TARGET_EFAULT;
5405     }
5406     arg_type++;
5407     assert(*arg_type == (int)STRUCT_rtentry);
5408     se = struct_entries + *arg_type++;
5409     assert(se->convert[0] == NULL);
5410     /* convert struct here to be able to catch rt_dev string */
5411     field_types = se->field_types;
5412     dst_offsets = se->field_offsets[THUNK_HOST];
5413     src_offsets = se->field_offsets[THUNK_TARGET];
5414     for (i = 0; i < se->nb_fields; i++) {
5415         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5416             assert(*field_types == TYPE_PTRVOID);
5417             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5418             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5419             if (*target_rt_dev_ptr != 0) {
5420                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5421                                                   tswapal(*target_rt_dev_ptr));
5422                 if (!*host_rt_dev_ptr) {
5423                     unlock_user(argptr, arg, 0);
5424                     return -TARGET_EFAULT;
5425                 }
5426             } else {
5427                 *host_rt_dev_ptr = 0;
5428             }
5429             field_types++;
5430             continue;
5431         }
5432         field_types = thunk_convert(buf_temp + dst_offsets[i],
5433                                     argptr + src_offsets[i],
5434                                     field_types, THUNK_HOST);
5435     }
5436     unlock_user(argptr, arg, 0);
5437 
5438     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5439 
5440     assert(host_rt_dev_ptr != NULL);
5441     assert(target_rt_dev_ptr != NULL);
5442     if (*host_rt_dev_ptr != 0) {
5443         unlock_user((void *)*host_rt_dev_ptr,
5444                     *target_rt_dev_ptr, 0);
5445     }
5446     return ret;
5447 }
5448 
5449 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5450                                      int fd, int cmd, abi_long arg)
5451 {
5452     int sig = target_to_host_signal(arg);
5453     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5454 }
5455 
5456 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5457                                     int fd, int cmd, abi_long arg)
5458 {
5459     struct timeval tv;
5460     abi_long ret;
5461 
5462     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5463     if (is_error(ret)) {
5464         return ret;
5465     }
5466 
5467     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5468         if (copy_to_user_timeval(arg, &tv)) {
5469             return -TARGET_EFAULT;
5470         }
5471     } else {
5472         if (copy_to_user_timeval64(arg, &tv)) {
5473             return -TARGET_EFAULT;
5474         }
5475     }
5476 
5477     return ret;
5478 }
5479 
5480 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5481                                       int fd, int cmd, abi_long arg)
5482 {
5483     struct timespec ts;
5484     abi_long ret;
5485 
5486     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5487     if (is_error(ret)) {
5488         return ret;
5489     }
5490 
5491     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5492         if (host_to_target_timespec(arg, &ts)) {
5493             return -TARGET_EFAULT;
5494         }
5495     } else{
5496         if (host_to_target_timespec64(arg, &ts)) {
5497             return -TARGET_EFAULT;
5498         }
5499     }
5500 
5501     return ret;
5502 }
5503 
5504 #ifdef TIOCGPTPEER
5505 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5506                                      int fd, int cmd, abi_long arg)
5507 {
5508     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5509     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5510 }
5511 #endif
5512 
5513 #ifdef HAVE_DRM_H
5514 
5515 static void unlock_drm_version(struct drm_version *host_ver,
5516                                struct target_drm_version *target_ver,
5517                                bool copy)
5518 {
5519     unlock_user(host_ver->name, target_ver->name,
5520                                 copy ? host_ver->name_len : 0);
5521     unlock_user(host_ver->date, target_ver->date,
5522                                 copy ? host_ver->date_len : 0);
5523     unlock_user(host_ver->desc, target_ver->desc,
5524                                 copy ? host_ver->desc_len : 0);
5525 }
5526 
5527 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5528                                           struct target_drm_version *target_ver)
5529 {
5530     memset(host_ver, 0, sizeof(*host_ver));
5531 
5532     __get_user(host_ver->name_len, &target_ver->name_len);
5533     if (host_ver->name_len) {
5534         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5535                                    target_ver->name_len, 0);
5536         if (!host_ver->name) {
5537             return -EFAULT;
5538         }
5539     }
5540 
5541     __get_user(host_ver->date_len, &target_ver->date_len);
5542     if (host_ver->date_len) {
5543         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5544                                    target_ver->date_len, 0);
5545         if (!host_ver->date) {
5546             goto err;
5547         }
5548     }
5549 
5550     __get_user(host_ver->desc_len, &target_ver->desc_len);
5551     if (host_ver->desc_len) {
5552         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5553                                    target_ver->desc_len, 0);
5554         if (!host_ver->desc) {
5555             goto err;
5556         }
5557     }
5558 
5559     return 0;
5560 err:
5561     unlock_drm_version(host_ver, target_ver, false);
5562     return -EFAULT;
5563 }
5564 
5565 static inline void host_to_target_drmversion(
5566                                           struct target_drm_version *target_ver,
5567                                           struct drm_version *host_ver)
5568 {
5569     __put_user(host_ver->version_major, &target_ver->version_major);
5570     __put_user(host_ver->version_minor, &target_ver->version_minor);
5571     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5572     __put_user(host_ver->name_len, &target_ver->name_len);
5573     __put_user(host_ver->date_len, &target_ver->date_len);
5574     __put_user(host_ver->desc_len, &target_ver->desc_len);
5575     unlock_drm_version(host_ver, target_ver, true);
5576 }
5577 
5578 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5579                              int fd, int cmd, abi_long arg)
5580 {
5581     struct drm_version *ver;
5582     struct target_drm_version *target_ver;
5583     abi_long ret;
5584 
5585     switch (ie->host_cmd) {
5586     case DRM_IOCTL_VERSION:
5587         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5588             return -TARGET_EFAULT;
5589         }
5590         ver = (struct drm_version *)buf_temp;
5591         ret = target_to_host_drmversion(ver, target_ver);
5592         if (!is_error(ret)) {
5593             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5594             if (is_error(ret)) {
5595                 unlock_drm_version(ver, target_ver, false);
5596             } else {
5597                 host_to_target_drmversion(target_ver, ver);
5598             }
5599         }
5600         unlock_user_struct(target_ver, arg, 0);
5601         return ret;
5602     }
5603     return -TARGET_ENOSYS;
5604 }
5605 
5606 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5607                                            struct drm_i915_getparam *gparam,
5608                                            int fd, abi_long arg)
5609 {
5610     abi_long ret;
5611     int value;
5612     struct target_drm_i915_getparam *target_gparam;
5613 
5614     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5615         return -TARGET_EFAULT;
5616     }
5617 
5618     __get_user(gparam->param, &target_gparam->param);
5619     gparam->value = &value;
5620     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5621     put_user_s32(value, target_gparam->value);
5622 
5623     unlock_user_struct(target_gparam, arg, 0);
5624     return ret;
5625 }
5626 
5627 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5628                                   int fd, int cmd, abi_long arg)
5629 {
5630     switch (ie->host_cmd) {
5631     case DRM_IOCTL_I915_GETPARAM:
5632         return do_ioctl_drm_i915_getparam(ie,
5633                                           (struct drm_i915_getparam *)buf_temp,
5634                                           fd, arg);
5635     default:
5636         return -TARGET_ENOSYS;
5637     }
5638 }
5639 
5640 #endif
5641 
5642 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5643                                         int fd, int cmd, abi_long arg)
5644 {
5645     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5646     struct tun_filter *target_filter;
5647     char *target_addr;
5648 
5649     assert(ie->access == IOC_W);
5650 
5651     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5652     if (!target_filter) {
5653         return -TARGET_EFAULT;
5654     }
5655     filter->flags = tswap16(target_filter->flags);
5656     filter->count = tswap16(target_filter->count);
5657     unlock_user(target_filter, arg, 0);
5658 
5659     if (filter->count) {
5660         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5661             MAX_STRUCT_SIZE) {
5662             return -TARGET_EFAULT;
5663         }
5664 
5665         target_addr = lock_user(VERIFY_READ,
5666                                 arg + offsetof(struct tun_filter, addr),
5667                                 filter->count * ETH_ALEN, 1);
5668         if (!target_addr) {
5669             return -TARGET_EFAULT;
5670         }
5671         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5672         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5673     }
5674 
5675     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5676 }
5677 
5678 IOCTLEntry ioctl_entries[] = {
5679 #define IOCTL(cmd, access, ...) \
5680     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5681 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5682     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5683 #define IOCTL_IGNORE(cmd) \
5684     { TARGET_ ## cmd, 0, #cmd },
5685 #include "ioctls.h"
5686     { 0, 0, },
5687 };
5688 
5689 /* ??? Implement proper locking for ioctls.  */
5690 /* do_ioctl() Must return target values and target errnos. */
5691 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5692 {
5693     const IOCTLEntry *ie;
5694     const argtype *arg_type;
5695     abi_long ret;
5696     uint8_t buf_temp[MAX_STRUCT_SIZE];
5697     int target_size;
5698     void *argptr;
5699 
5700     ie = ioctl_entries;
5701     for(;;) {
5702         if (ie->target_cmd == 0) {
5703             qemu_log_mask(
5704                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5705             return -TARGET_ENOSYS;
5706         }
5707         if (ie->target_cmd == cmd)
5708             break;
5709         ie++;
5710     }
5711     arg_type = ie->arg_type;
5712     if (ie->do_ioctl) {
5713         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5714     } else if (!ie->host_cmd) {
5715         /* Some architectures define BSD ioctls in their headers
5716            that are not implemented in Linux.  */
5717         return -TARGET_ENOSYS;
5718     }
5719 
5720     switch(arg_type[0]) {
5721     case TYPE_NULL:
5722         /* no argument */
5723         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5724         break;
5725     case TYPE_PTRVOID:
5726     case TYPE_INT:
5727     case TYPE_LONG:
5728     case TYPE_ULONG:
5729         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5730         break;
5731     case TYPE_PTR:
5732         arg_type++;
5733         target_size = thunk_type_size(arg_type, 0);
5734         switch(ie->access) {
5735         case IOC_R:
5736             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5737             if (!is_error(ret)) {
5738                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5739                 if (!argptr)
5740                     return -TARGET_EFAULT;
5741                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5742                 unlock_user(argptr, arg, target_size);
5743             }
5744             break;
5745         case IOC_W:
5746             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5747             if (!argptr)
5748                 return -TARGET_EFAULT;
5749             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5750             unlock_user(argptr, arg, 0);
5751             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5752             break;
5753         default:
5754         case IOC_RW:
5755             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5756             if (!argptr)
5757                 return -TARGET_EFAULT;
5758             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5759             unlock_user(argptr, arg, 0);
5760             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5761             if (!is_error(ret)) {
5762                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5763                 if (!argptr)
5764                     return -TARGET_EFAULT;
5765                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5766                 unlock_user(argptr, arg, target_size);
5767             }
5768             break;
5769         }
5770         break;
5771     default:
5772         qemu_log_mask(LOG_UNIMP,
5773                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5774                       (long)cmd, arg_type[0]);
5775         ret = -TARGET_ENOSYS;
5776         break;
5777     }
5778     return ret;
5779 }
5780 
5781 static const bitmask_transtbl iflag_tbl[] = {
5782         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5783         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5784         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5785         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5786         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5787         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5788         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5789         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5790         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5791         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5792         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5793         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5794         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5795         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5796         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5797         { 0, 0, 0, 0 }
5798 };
5799 
5800 static const bitmask_transtbl oflag_tbl[] = {
5801 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5802 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5803 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5804 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5805 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5806 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5807 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5808 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5809 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5810 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5811 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5812 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5813 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5814 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5815 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5816 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5817 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5818 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5819 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5820 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5821 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5822 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5823 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5824 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5825 	{ 0, 0, 0, 0 }
5826 };
5827 
5828 static const bitmask_transtbl cflag_tbl[] = {
5829 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5830 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5831 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5832 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5833 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5834 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5835 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5836 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5837 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5838 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5839 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5840 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5841 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5842 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5843 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5844 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5845 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5846 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5847 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5848 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5849 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5850 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5851 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5852 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5853 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5854 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5855 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5856 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5857 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5858 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5859 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5860 	{ 0, 0, 0, 0 }
5861 };
5862 
5863 static const bitmask_transtbl lflag_tbl[] = {
5864   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5865   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5866   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5867   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5868   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5869   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5870   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5871   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5872   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5873   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5874   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5875   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5876   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5877   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5878   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5879   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5880   { 0, 0, 0, 0 }
5881 };
5882 
5883 static void target_to_host_termios (void *dst, const void *src)
5884 {
5885     struct host_termios *host = dst;
5886     const struct target_termios *target = src;
5887 
5888     host->c_iflag =
5889         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5890     host->c_oflag =
5891         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5892     host->c_cflag =
5893         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5894     host->c_lflag =
5895         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5896     host->c_line = target->c_line;
5897 
5898     memset(host->c_cc, 0, sizeof(host->c_cc));
5899     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5900     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5901     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5902     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5903     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5904     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5905     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5906     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5907     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5908     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5909     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5910     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5911     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5912     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5913     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5914     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5915     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5916 }
5917 
5918 static void host_to_target_termios (void *dst, const void *src)
5919 {
5920     struct target_termios *target = dst;
5921     const struct host_termios *host = src;
5922 
5923     target->c_iflag =
5924         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5925     target->c_oflag =
5926         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5927     target->c_cflag =
5928         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5929     target->c_lflag =
5930         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5931     target->c_line = host->c_line;
5932 
5933     memset(target->c_cc, 0, sizeof(target->c_cc));
5934     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5935     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5936     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5937     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5938     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5939     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5940     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5941     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5942     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5943     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5944     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5945     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5946     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5947     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5948     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5949     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5950     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5951 }
5952 
5953 static const StructEntry struct_termios_def = {
5954     .convert = { host_to_target_termios, target_to_host_termios },
5955     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5956     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5957     .print = print_termios,
5958 };
5959 
5960 static const bitmask_transtbl mmap_flags_tbl[] = {
5961     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5962     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5963     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5964     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5965       MAP_ANONYMOUS, MAP_ANONYMOUS },
5966     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5967       MAP_GROWSDOWN, MAP_GROWSDOWN },
5968     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5969       MAP_DENYWRITE, MAP_DENYWRITE },
5970     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5971       MAP_EXECUTABLE, MAP_EXECUTABLE },
5972     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5973     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5974       MAP_NORESERVE, MAP_NORESERVE },
5975     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5976     /* MAP_STACK had been ignored by the kernel for quite some time.
5977        Recognize it for the target insofar as we do not want to pass
5978        it through to the host.  */
5979     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5980     { 0, 0, 0, 0 }
5981 };
5982 
5983 /*
5984  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5985  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5986  */
5987 #if defined(TARGET_I386)
5988 
5989 /* NOTE: there is really one LDT for all the threads */
5990 static uint8_t *ldt_table;
5991 
5992 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5993 {
5994     int size;
5995     void *p;
5996 
5997     if (!ldt_table)
5998         return 0;
5999     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6000     if (size > bytecount)
6001         size = bytecount;
6002     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6003     if (!p)
6004         return -TARGET_EFAULT;
6005     /* ??? Should this by byteswapped?  */
6006     memcpy(p, ldt_table, size);
6007     unlock_user(p, ptr, size);
6008     return size;
6009 }
6010 
6011 /* XXX: add locking support */
6012 static abi_long write_ldt(CPUX86State *env,
6013                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6014 {
6015     struct target_modify_ldt_ldt_s ldt_info;
6016     struct target_modify_ldt_ldt_s *target_ldt_info;
6017     int seg_32bit, contents, read_exec_only, limit_in_pages;
6018     int seg_not_present, useable, lm;
6019     uint32_t *lp, entry_1, entry_2;
6020 
6021     if (bytecount != sizeof(ldt_info))
6022         return -TARGET_EINVAL;
6023     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6024         return -TARGET_EFAULT;
6025     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6026     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6027     ldt_info.limit = tswap32(target_ldt_info->limit);
6028     ldt_info.flags = tswap32(target_ldt_info->flags);
6029     unlock_user_struct(target_ldt_info, ptr, 0);
6030 
6031     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6032         return -TARGET_EINVAL;
6033     seg_32bit = ldt_info.flags & 1;
6034     contents = (ldt_info.flags >> 1) & 3;
6035     read_exec_only = (ldt_info.flags >> 3) & 1;
6036     limit_in_pages = (ldt_info.flags >> 4) & 1;
6037     seg_not_present = (ldt_info.flags >> 5) & 1;
6038     useable = (ldt_info.flags >> 6) & 1;
6039 #ifdef TARGET_ABI32
6040     lm = 0;
6041 #else
6042     lm = (ldt_info.flags >> 7) & 1;
6043 #endif
6044     if (contents == 3) {
6045         if (oldmode)
6046             return -TARGET_EINVAL;
6047         if (seg_not_present == 0)
6048             return -TARGET_EINVAL;
6049     }
6050     /* allocate the LDT */
6051     if (!ldt_table) {
6052         env->ldt.base = target_mmap(0,
6053                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6054                                     PROT_READ|PROT_WRITE,
6055                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6056         if (env->ldt.base == -1)
6057             return -TARGET_ENOMEM;
6058         memset(g2h_untagged(env->ldt.base), 0,
6059                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6060         env->ldt.limit = 0xffff;
6061         ldt_table = g2h_untagged(env->ldt.base);
6062     }
6063 
6064     /* NOTE: same code as Linux kernel */
6065     /* Allow LDTs to be cleared by the user. */
6066     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6067         if (oldmode ||
6068             (contents == 0		&&
6069              read_exec_only == 1	&&
6070              seg_32bit == 0		&&
6071              limit_in_pages == 0	&&
6072              seg_not_present == 1	&&
6073              useable == 0 )) {
6074             entry_1 = 0;
6075             entry_2 = 0;
6076             goto install;
6077         }
6078     }
6079 
6080     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6081         (ldt_info.limit & 0x0ffff);
6082     entry_2 = (ldt_info.base_addr & 0xff000000) |
6083         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6084         (ldt_info.limit & 0xf0000) |
6085         ((read_exec_only ^ 1) << 9) |
6086         (contents << 10) |
6087         ((seg_not_present ^ 1) << 15) |
6088         (seg_32bit << 22) |
6089         (limit_in_pages << 23) |
6090         (lm << 21) |
6091         0x7000;
6092     if (!oldmode)
6093         entry_2 |= (useable << 20);
6094 
6095     /* Install the new entry ...  */
6096 install:
6097     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6098     lp[0] = tswap32(entry_1);
6099     lp[1] = tswap32(entry_2);
6100     return 0;
6101 }
6102 
6103 /* specific and weird i386 syscalls */
6104 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6105                               unsigned long bytecount)
6106 {
6107     abi_long ret;
6108 
6109     switch (func) {
6110     case 0:
6111         ret = read_ldt(ptr, bytecount);
6112         break;
6113     case 1:
6114         ret = write_ldt(env, ptr, bytecount, 1);
6115         break;
6116     case 0x11:
6117         ret = write_ldt(env, ptr, bytecount, 0);
6118         break;
6119     default:
6120         ret = -TARGET_ENOSYS;
6121         break;
6122     }
6123     return ret;
6124 }
6125 
6126 #if defined(TARGET_ABI32)
6127 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6128 {
6129     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6130     struct target_modify_ldt_ldt_s ldt_info;
6131     struct target_modify_ldt_ldt_s *target_ldt_info;
6132     int seg_32bit, contents, read_exec_only, limit_in_pages;
6133     int seg_not_present, useable, lm;
6134     uint32_t *lp, entry_1, entry_2;
6135     int i;
6136 
6137     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6138     if (!target_ldt_info)
6139         return -TARGET_EFAULT;
6140     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6141     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6142     ldt_info.limit = tswap32(target_ldt_info->limit);
6143     ldt_info.flags = tswap32(target_ldt_info->flags);
6144     if (ldt_info.entry_number == -1) {
6145         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6146             if (gdt_table[i] == 0) {
6147                 ldt_info.entry_number = i;
6148                 target_ldt_info->entry_number = tswap32(i);
6149                 break;
6150             }
6151         }
6152     }
6153     unlock_user_struct(target_ldt_info, ptr, 1);
6154 
6155     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6156         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6157            return -TARGET_EINVAL;
6158     seg_32bit = ldt_info.flags & 1;
6159     contents = (ldt_info.flags >> 1) & 3;
6160     read_exec_only = (ldt_info.flags >> 3) & 1;
6161     limit_in_pages = (ldt_info.flags >> 4) & 1;
6162     seg_not_present = (ldt_info.flags >> 5) & 1;
6163     useable = (ldt_info.flags >> 6) & 1;
6164 #ifdef TARGET_ABI32
6165     lm = 0;
6166 #else
6167     lm = (ldt_info.flags >> 7) & 1;
6168 #endif
6169 
6170     if (contents == 3) {
6171         if (seg_not_present == 0)
6172             return -TARGET_EINVAL;
6173     }
6174 
6175     /* NOTE: same code as Linux kernel */
6176     /* Allow LDTs to be cleared by the user. */
6177     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6178         if ((contents == 0             &&
6179              read_exec_only == 1       &&
6180              seg_32bit == 0            &&
6181              limit_in_pages == 0       &&
6182              seg_not_present == 1      &&
6183              useable == 0 )) {
6184             entry_1 = 0;
6185             entry_2 = 0;
6186             goto install;
6187         }
6188     }
6189 
6190     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6191         (ldt_info.limit & 0x0ffff);
6192     entry_2 = (ldt_info.base_addr & 0xff000000) |
6193         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6194         (ldt_info.limit & 0xf0000) |
6195         ((read_exec_only ^ 1) << 9) |
6196         (contents << 10) |
6197         ((seg_not_present ^ 1) << 15) |
6198         (seg_32bit << 22) |
6199         (limit_in_pages << 23) |
6200         (useable << 20) |
6201         (lm << 21) |
6202         0x7000;
6203 
6204     /* Install the new entry ...  */
6205 install:
6206     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6207     lp[0] = tswap32(entry_1);
6208     lp[1] = tswap32(entry_2);
6209     return 0;
6210 }
6211 
6212 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6213 {
6214     struct target_modify_ldt_ldt_s *target_ldt_info;
6215     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6216     uint32_t base_addr, limit, flags;
6217     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6218     int seg_not_present, useable, lm;
6219     uint32_t *lp, entry_1, entry_2;
6220 
6221     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6222     if (!target_ldt_info)
6223         return -TARGET_EFAULT;
6224     idx = tswap32(target_ldt_info->entry_number);
6225     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6226         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6227         unlock_user_struct(target_ldt_info, ptr, 1);
6228         return -TARGET_EINVAL;
6229     }
6230     lp = (uint32_t *)(gdt_table + idx);
6231     entry_1 = tswap32(lp[0]);
6232     entry_2 = tswap32(lp[1]);
6233 
6234     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6235     contents = (entry_2 >> 10) & 3;
6236     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6237     seg_32bit = (entry_2 >> 22) & 1;
6238     limit_in_pages = (entry_2 >> 23) & 1;
6239     useable = (entry_2 >> 20) & 1;
6240 #ifdef TARGET_ABI32
6241     lm = 0;
6242 #else
6243     lm = (entry_2 >> 21) & 1;
6244 #endif
6245     flags = (seg_32bit << 0) | (contents << 1) |
6246         (read_exec_only << 3) | (limit_in_pages << 4) |
6247         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6248     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6249     base_addr = (entry_1 >> 16) |
6250         (entry_2 & 0xff000000) |
6251         ((entry_2 & 0xff) << 16);
6252     target_ldt_info->base_addr = tswapal(base_addr);
6253     target_ldt_info->limit = tswap32(limit);
6254     target_ldt_info->flags = tswap32(flags);
6255     unlock_user_struct(target_ldt_info, ptr, 1);
6256     return 0;
6257 }
6258 
6259 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6260 {
6261     return -TARGET_ENOSYS;
6262 }
6263 #else
6264 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6265 {
6266     abi_long ret = 0;
6267     abi_ulong val;
6268     int idx;
6269 
6270     switch(code) {
6271     case TARGET_ARCH_SET_GS:
6272     case TARGET_ARCH_SET_FS:
6273         if (code == TARGET_ARCH_SET_GS)
6274             idx = R_GS;
6275         else
6276             idx = R_FS;
6277         cpu_x86_load_seg(env, idx, 0);
6278         env->segs[idx].base = addr;
6279         break;
6280     case TARGET_ARCH_GET_GS:
6281     case TARGET_ARCH_GET_FS:
6282         if (code == TARGET_ARCH_GET_GS)
6283             idx = R_GS;
6284         else
6285             idx = R_FS;
6286         val = env->segs[idx].base;
6287         if (put_user(val, addr, abi_ulong))
6288             ret = -TARGET_EFAULT;
6289         break;
6290     default:
6291         ret = -TARGET_EINVAL;
6292         break;
6293     }
6294     return ret;
6295 }
6296 #endif /* defined(TARGET_ABI32 */
6297 #endif /* defined(TARGET_I386) */
6298 
6299 /*
6300  * These constants are generic.  Supply any that are missing from the host.
6301  */
6302 #ifndef PR_SET_NAME
6303 # define PR_SET_NAME    15
6304 # define PR_GET_NAME    16
6305 #endif
6306 #ifndef PR_SET_FP_MODE
6307 # define PR_SET_FP_MODE 45
6308 # define PR_GET_FP_MODE 46
6309 # define PR_FP_MODE_FR   (1 << 0)
6310 # define PR_FP_MODE_FRE  (1 << 1)
6311 #endif
6312 #ifndef PR_SVE_SET_VL
6313 # define PR_SVE_SET_VL  50
6314 # define PR_SVE_GET_VL  51
6315 # define PR_SVE_VL_LEN_MASK  0xffff
6316 # define PR_SVE_VL_INHERIT   (1 << 17)
6317 #endif
6318 #ifndef PR_PAC_RESET_KEYS
6319 # define PR_PAC_RESET_KEYS  54
6320 # define PR_PAC_APIAKEY   (1 << 0)
6321 # define PR_PAC_APIBKEY   (1 << 1)
6322 # define PR_PAC_APDAKEY   (1 << 2)
6323 # define PR_PAC_APDBKEY   (1 << 3)
6324 # define PR_PAC_APGAKEY   (1 << 4)
6325 #endif
6326 #ifndef PR_SET_TAGGED_ADDR_CTRL
6327 # define PR_SET_TAGGED_ADDR_CTRL 55
6328 # define PR_GET_TAGGED_ADDR_CTRL 56
6329 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6330 #endif
6331 #ifndef PR_MTE_TCF_SHIFT
6332 # define PR_MTE_TCF_SHIFT       1
6333 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6334 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6335 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6336 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6337 # define PR_MTE_TAG_SHIFT       3
6338 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6339 #endif
6340 #ifndef PR_SET_IO_FLUSHER
6341 # define PR_SET_IO_FLUSHER 57
6342 # define PR_GET_IO_FLUSHER 58
6343 #endif
6344 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6345 # define PR_SET_SYSCALL_USER_DISPATCH 59
6346 #endif
6347 
6348 #include "target_prctl.h"
6349 
6350 static abi_long do_prctl_inval0(CPUArchState *env)
6351 {
6352     return -TARGET_EINVAL;
6353 }
6354 
6355 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6356 {
6357     return -TARGET_EINVAL;
6358 }
6359 
6360 #ifndef do_prctl_get_fp_mode
6361 #define do_prctl_get_fp_mode do_prctl_inval0
6362 #endif
6363 #ifndef do_prctl_set_fp_mode
6364 #define do_prctl_set_fp_mode do_prctl_inval1
6365 #endif
6366 #ifndef do_prctl_get_vl
6367 #define do_prctl_get_vl do_prctl_inval0
6368 #endif
6369 #ifndef do_prctl_set_vl
6370 #define do_prctl_set_vl do_prctl_inval1
6371 #endif
6372 #ifndef do_prctl_reset_keys
6373 #define do_prctl_reset_keys do_prctl_inval1
6374 #endif
6375 #ifndef do_prctl_set_tagged_addr_ctrl
6376 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6377 #endif
6378 #ifndef do_prctl_get_tagged_addr_ctrl
6379 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6380 #endif
6381 #ifndef do_prctl_get_unalign
6382 #define do_prctl_get_unalign do_prctl_inval1
6383 #endif
6384 #ifndef do_prctl_set_unalign
6385 #define do_prctl_set_unalign do_prctl_inval1
6386 #endif
6387 
6388 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6389                          abi_long arg3, abi_long arg4, abi_long arg5)
6390 {
6391     abi_long ret;
6392 
6393     switch (option) {
6394     case PR_GET_PDEATHSIG:
6395         {
6396             int deathsig;
6397             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6398                                   arg3, arg4, arg5));
6399             if (!is_error(ret) && arg2 && put_user_s32(deathsig, arg2)) {
6400                 return -TARGET_EFAULT;
6401             }
6402             return ret;
6403         }
6404     case PR_GET_NAME:
6405         {
6406             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6407             if (!name) {
6408                 return -TARGET_EFAULT;
6409             }
6410             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6411                                   arg3, arg4, arg5));
6412             unlock_user(name, arg2, 16);
6413             return ret;
6414         }
6415     case PR_SET_NAME:
6416         {
6417             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6418             if (!name) {
6419                 return -TARGET_EFAULT;
6420             }
6421             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6422                                   arg3, arg4, arg5));
6423             unlock_user(name, arg2, 0);
6424             return ret;
6425         }
6426     case PR_GET_FP_MODE:
6427         return do_prctl_get_fp_mode(env);
6428     case PR_SET_FP_MODE:
6429         return do_prctl_set_fp_mode(env, arg2);
6430     case PR_SVE_GET_VL:
6431         return do_prctl_get_vl(env);
6432     case PR_SVE_SET_VL:
6433         return do_prctl_set_vl(env, arg2);
6434     case PR_PAC_RESET_KEYS:
6435         if (arg3 || arg4 || arg5) {
6436             return -TARGET_EINVAL;
6437         }
6438         return do_prctl_reset_keys(env, arg2);
6439     case PR_SET_TAGGED_ADDR_CTRL:
6440         if (arg3 || arg4 || arg5) {
6441             return -TARGET_EINVAL;
6442         }
6443         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6444     case PR_GET_TAGGED_ADDR_CTRL:
6445         if (arg2 || arg3 || arg4 || arg5) {
6446             return -TARGET_EINVAL;
6447         }
6448         return do_prctl_get_tagged_addr_ctrl(env);
6449 
6450     case PR_GET_UNALIGN:
6451         return do_prctl_get_unalign(env, arg2);
6452     case PR_SET_UNALIGN:
6453         return do_prctl_set_unalign(env, arg2);
6454 
6455     case PR_GET_DUMPABLE:
6456     case PR_SET_DUMPABLE:
6457     case PR_GET_KEEPCAPS:
6458     case PR_SET_KEEPCAPS:
6459     case PR_GET_TIMING:
6460     case PR_SET_TIMING:
6461     case PR_GET_TIMERSLACK:
6462     case PR_SET_TIMERSLACK:
6463     case PR_MCE_KILL:
6464     case PR_MCE_KILL_GET:
6465     case PR_GET_NO_NEW_PRIVS:
6466     case PR_SET_NO_NEW_PRIVS:
6467     case PR_GET_IO_FLUSHER:
6468     case PR_SET_IO_FLUSHER:
6469         /* Some prctl options have no pointer arguments and we can pass on. */
6470         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6471 
6472     case PR_GET_CHILD_SUBREAPER:
6473     case PR_SET_CHILD_SUBREAPER:
6474     case PR_GET_SPECULATION_CTRL:
6475     case PR_SET_SPECULATION_CTRL:
6476     case PR_GET_TID_ADDRESS:
6477         /* TODO */
6478         return -TARGET_EINVAL;
6479 
6480     case PR_GET_FPEXC:
6481     case PR_SET_FPEXC:
6482         /* Was used for SPE on PowerPC. */
6483         return -TARGET_EINVAL;
6484 
6485     case PR_GET_ENDIAN:
6486     case PR_SET_ENDIAN:
6487     case PR_GET_FPEMU:
6488     case PR_SET_FPEMU:
6489     case PR_SET_MM:
6490     case PR_GET_SECCOMP:
6491     case PR_SET_SECCOMP:
6492     case PR_SET_SYSCALL_USER_DISPATCH:
6493     case PR_GET_THP_DISABLE:
6494     case PR_SET_THP_DISABLE:
6495     case PR_GET_TSC:
6496     case PR_SET_TSC:
6497         /* Disable to prevent the target disabling stuff we need. */
6498         return -TARGET_EINVAL;
6499 
6500     default:
6501         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6502                       option);
6503         return -TARGET_EINVAL;
6504     }
6505 }
6506 
6507 #define NEW_STACK_SIZE 0x40000
6508 
6509 
6510 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6511 typedef struct {
6512     CPUArchState *env;
6513     pthread_mutex_t mutex;
6514     pthread_cond_t cond;
6515     pthread_t thread;
6516     uint32_t tid;
6517     abi_ulong child_tidptr;
6518     abi_ulong parent_tidptr;
6519     sigset_t sigmask;
6520 } new_thread_info;
6521 
6522 static void *clone_func(void *arg)
6523 {
6524     new_thread_info *info = arg;
6525     CPUArchState *env;
6526     CPUState *cpu;
6527     TaskState *ts;
6528 
6529     rcu_register_thread();
6530     tcg_register_thread();
6531     env = info->env;
6532     cpu = env_cpu(env);
6533     thread_cpu = cpu;
6534     ts = (TaskState *)cpu->opaque;
6535     info->tid = sys_gettid();
6536     task_settid(ts);
6537     if (info->child_tidptr)
6538         put_user_u32(info->tid, info->child_tidptr);
6539     if (info->parent_tidptr)
6540         put_user_u32(info->tid, info->parent_tidptr);
6541     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6542     /* Enable signals.  */
6543     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6544     /* Signal to the parent that we're ready.  */
6545     pthread_mutex_lock(&info->mutex);
6546     pthread_cond_broadcast(&info->cond);
6547     pthread_mutex_unlock(&info->mutex);
6548     /* Wait until the parent has finished initializing the tls state.  */
6549     pthread_mutex_lock(&clone_lock);
6550     pthread_mutex_unlock(&clone_lock);
6551     cpu_loop(env);
6552     /* never exits */
6553     return NULL;
6554 }
6555 
6556 /* do_fork() Must return host values and target errnos (unlike most
6557    do_*() functions). */
6558 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6559                    abi_ulong parent_tidptr, target_ulong newtls,
6560                    abi_ulong child_tidptr)
6561 {
6562     CPUState *cpu = env_cpu(env);
6563     int ret;
6564     TaskState *ts;
6565     CPUState *new_cpu;
6566     CPUArchState *new_env;
6567     sigset_t sigmask;
6568 
6569     flags &= ~CLONE_IGNORED_FLAGS;
6570 
6571     /* Emulate vfork() with fork() */
6572     if (flags & CLONE_VFORK)
6573         flags &= ~(CLONE_VFORK | CLONE_VM);
6574 
6575     if (flags & CLONE_VM) {
6576         TaskState *parent_ts = (TaskState *)cpu->opaque;
6577         new_thread_info info;
6578         pthread_attr_t attr;
6579 
6580         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6581             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6582             return -TARGET_EINVAL;
6583         }
6584 
6585         ts = g_new0(TaskState, 1);
6586         init_task_state(ts);
6587 
6588         /* Grab a mutex so that thread setup appears atomic.  */
6589         pthread_mutex_lock(&clone_lock);
6590 
6591         /*
6592          * If this is our first additional thread, we need to ensure we
6593          * generate code for parallel execution and flush old translations.
6594          * Do this now so that the copy gets CF_PARALLEL too.
6595          */
6596         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6597             cpu->tcg_cflags |= CF_PARALLEL;
6598             tb_flush(cpu);
6599         }
6600 
6601         /* we create a new CPU instance. */
6602         new_env = cpu_copy(env);
6603         /* Init regs that differ from the parent.  */
6604         cpu_clone_regs_child(new_env, newsp, flags);
6605         cpu_clone_regs_parent(env, flags);
6606         new_cpu = env_cpu(new_env);
6607         new_cpu->opaque = ts;
6608         ts->bprm = parent_ts->bprm;
6609         ts->info = parent_ts->info;
6610         ts->signal_mask = parent_ts->signal_mask;
6611 
6612         if (flags & CLONE_CHILD_CLEARTID) {
6613             ts->child_tidptr = child_tidptr;
6614         }
6615 
6616         if (flags & CLONE_SETTLS) {
6617             cpu_set_tls (new_env, newtls);
6618         }
6619 
6620         memset(&info, 0, sizeof(info));
6621         pthread_mutex_init(&info.mutex, NULL);
6622         pthread_mutex_lock(&info.mutex);
6623         pthread_cond_init(&info.cond, NULL);
6624         info.env = new_env;
6625         if (flags & CLONE_CHILD_SETTID) {
6626             info.child_tidptr = child_tidptr;
6627         }
6628         if (flags & CLONE_PARENT_SETTID) {
6629             info.parent_tidptr = parent_tidptr;
6630         }
6631 
6632         ret = pthread_attr_init(&attr);
6633         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6634         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6635         /* It is not safe to deliver signals until the child has finished
6636            initializing, so temporarily block all signals.  */
6637         sigfillset(&sigmask);
6638         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6639         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6640 
6641         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6642         /* TODO: Free new CPU state if thread creation failed.  */
6643 
6644         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6645         pthread_attr_destroy(&attr);
6646         if (ret == 0) {
6647             /* Wait for the child to initialize.  */
6648             pthread_cond_wait(&info.cond, &info.mutex);
6649             ret = info.tid;
6650         } else {
6651             ret = -1;
6652         }
6653         pthread_mutex_unlock(&info.mutex);
6654         pthread_cond_destroy(&info.cond);
6655         pthread_mutex_destroy(&info.mutex);
6656         pthread_mutex_unlock(&clone_lock);
6657     } else {
6658         /* if no CLONE_VM, we consider it is a fork */
6659         if (flags & CLONE_INVALID_FORK_FLAGS) {
6660             return -TARGET_EINVAL;
6661         }
6662 
6663         /* We can't support custom termination signals */
6664         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6665             return -TARGET_EINVAL;
6666         }
6667 
6668         if (block_signals()) {
6669             return -QEMU_ERESTARTSYS;
6670         }
6671 
6672         fork_start();
6673         ret = fork();
6674         if (ret == 0) {
6675             /* Child Process.  */
6676             cpu_clone_regs_child(env, newsp, flags);
6677             fork_end(1);
6678             /* There is a race condition here.  The parent process could
6679                theoretically read the TID in the child process before the child
6680                tid is set.  This would require using either ptrace
6681                (not implemented) or having *_tidptr to point at a shared memory
6682                mapping.  We can't repeat the spinlock hack used above because
6683                the child process gets its own copy of the lock.  */
6684             if (flags & CLONE_CHILD_SETTID)
6685                 put_user_u32(sys_gettid(), child_tidptr);
6686             if (flags & CLONE_PARENT_SETTID)
6687                 put_user_u32(sys_gettid(), parent_tidptr);
6688             ts = (TaskState *)cpu->opaque;
6689             if (flags & CLONE_SETTLS)
6690                 cpu_set_tls (env, newtls);
6691             if (flags & CLONE_CHILD_CLEARTID)
6692                 ts->child_tidptr = child_tidptr;
6693         } else {
6694             cpu_clone_regs_parent(env, flags);
6695             fork_end(0);
6696         }
6697     }
6698     return ret;
6699 }
6700 
6701 /* warning : doesn't handle linux specific flags... */
6702 static int target_to_host_fcntl_cmd(int cmd)
6703 {
6704     int ret;
6705 
6706     switch(cmd) {
6707     case TARGET_F_DUPFD:
6708     case TARGET_F_GETFD:
6709     case TARGET_F_SETFD:
6710     case TARGET_F_GETFL:
6711     case TARGET_F_SETFL:
6712     case TARGET_F_OFD_GETLK:
6713     case TARGET_F_OFD_SETLK:
6714     case TARGET_F_OFD_SETLKW:
6715         ret = cmd;
6716         break;
6717     case TARGET_F_GETLK:
6718         ret = F_GETLK64;
6719         break;
6720     case TARGET_F_SETLK:
6721         ret = F_SETLK64;
6722         break;
6723     case TARGET_F_SETLKW:
6724         ret = F_SETLKW64;
6725         break;
6726     case TARGET_F_GETOWN:
6727         ret = F_GETOWN;
6728         break;
6729     case TARGET_F_SETOWN:
6730         ret = F_SETOWN;
6731         break;
6732     case TARGET_F_GETSIG:
6733         ret = F_GETSIG;
6734         break;
6735     case TARGET_F_SETSIG:
6736         ret = F_SETSIG;
6737         break;
6738 #if TARGET_ABI_BITS == 32
6739     case TARGET_F_GETLK64:
6740         ret = F_GETLK64;
6741         break;
6742     case TARGET_F_SETLK64:
6743         ret = F_SETLK64;
6744         break;
6745     case TARGET_F_SETLKW64:
6746         ret = F_SETLKW64;
6747         break;
6748 #endif
6749     case TARGET_F_SETLEASE:
6750         ret = F_SETLEASE;
6751         break;
6752     case TARGET_F_GETLEASE:
6753         ret = F_GETLEASE;
6754         break;
6755 #ifdef F_DUPFD_CLOEXEC
6756     case TARGET_F_DUPFD_CLOEXEC:
6757         ret = F_DUPFD_CLOEXEC;
6758         break;
6759 #endif
6760     case TARGET_F_NOTIFY:
6761         ret = F_NOTIFY;
6762         break;
6763 #ifdef F_GETOWN_EX
6764     case TARGET_F_GETOWN_EX:
6765         ret = F_GETOWN_EX;
6766         break;
6767 #endif
6768 #ifdef F_SETOWN_EX
6769     case TARGET_F_SETOWN_EX:
6770         ret = F_SETOWN_EX;
6771         break;
6772 #endif
6773 #ifdef F_SETPIPE_SZ
6774     case TARGET_F_SETPIPE_SZ:
6775         ret = F_SETPIPE_SZ;
6776         break;
6777     case TARGET_F_GETPIPE_SZ:
6778         ret = F_GETPIPE_SZ;
6779         break;
6780 #endif
6781 #ifdef F_ADD_SEALS
6782     case TARGET_F_ADD_SEALS:
6783         ret = F_ADD_SEALS;
6784         break;
6785     case TARGET_F_GET_SEALS:
6786         ret = F_GET_SEALS;
6787         break;
6788 #endif
6789     default:
6790         ret = -TARGET_EINVAL;
6791         break;
6792     }
6793 
6794 #if defined(__powerpc64__)
6795     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6796      * is not supported by kernel. The glibc fcntl call actually adjusts
6797      * them to 5, 6 and 7 before making the syscall(). Since we make the
6798      * syscall directly, adjust to what is supported by the kernel.
6799      */
6800     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6801         ret -= F_GETLK64 - 5;
6802     }
6803 #endif
6804 
6805     return ret;
6806 }
6807 
6808 #define FLOCK_TRANSTBL \
6809     switch (type) { \
6810     TRANSTBL_CONVERT(F_RDLCK); \
6811     TRANSTBL_CONVERT(F_WRLCK); \
6812     TRANSTBL_CONVERT(F_UNLCK); \
6813     }
6814 
6815 static int target_to_host_flock(int type)
6816 {
6817 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6818     FLOCK_TRANSTBL
6819 #undef  TRANSTBL_CONVERT
6820     return -TARGET_EINVAL;
6821 }
6822 
6823 static int host_to_target_flock(int type)
6824 {
6825 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6826     FLOCK_TRANSTBL
6827 #undef  TRANSTBL_CONVERT
6828     /* if we don't know how to convert the value coming
6829      * from the host we copy to the target field as-is
6830      */
6831     return type;
6832 }
6833 
6834 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6835                                             abi_ulong target_flock_addr)
6836 {
6837     struct target_flock *target_fl;
6838     int l_type;
6839 
6840     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6841         return -TARGET_EFAULT;
6842     }
6843 
6844     __get_user(l_type, &target_fl->l_type);
6845     l_type = target_to_host_flock(l_type);
6846     if (l_type < 0) {
6847         return l_type;
6848     }
6849     fl->l_type = l_type;
6850     __get_user(fl->l_whence, &target_fl->l_whence);
6851     __get_user(fl->l_start, &target_fl->l_start);
6852     __get_user(fl->l_len, &target_fl->l_len);
6853     __get_user(fl->l_pid, &target_fl->l_pid);
6854     unlock_user_struct(target_fl, target_flock_addr, 0);
6855     return 0;
6856 }
6857 
6858 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6859                                           const struct flock64 *fl)
6860 {
6861     struct target_flock *target_fl;
6862     short l_type;
6863 
6864     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6865         return -TARGET_EFAULT;
6866     }
6867 
6868     l_type = host_to_target_flock(fl->l_type);
6869     __put_user(l_type, &target_fl->l_type);
6870     __put_user(fl->l_whence, &target_fl->l_whence);
6871     __put_user(fl->l_start, &target_fl->l_start);
6872     __put_user(fl->l_len, &target_fl->l_len);
6873     __put_user(fl->l_pid, &target_fl->l_pid);
6874     unlock_user_struct(target_fl, target_flock_addr, 1);
6875     return 0;
6876 }
6877 
6878 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6879 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6880 
6881 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6882 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6883                                                    abi_ulong target_flock_addr)
6884 {
6885     struct target_oabi_flock64 *target_fl;
6886     int l_type;
6887 
6888     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6889         return -TARGET_EFAULT;
6890     }
6891 
6892     __get_user(l_type, &target_fl->l_type);
6893     l_type = target_to_host_flock(l_type);
6894     if (l_type < 0) {
6895         return l_type;
6896     }
6897     fl->l_type = l_type;
6898     __get_user(fl->l_whence, &target_fl->l_whence);
6899     __get_user(fl->l_start, &target_fl->l_start);
6900     __get_user(fl->l_len, &target_fl->l_len);
6901     __get_user(fl->l_pid, &target_fl->l_pid);
6902     unlock_user_struct(target_fl, target_flock_addr, 0);
6903     return 0;
6904 }
6905 
6906 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6907                                                  const struct flock64 *fl)
6908 {
6909     struct target_oabi_flock64 *target_fl;
6910     short l_type;
6911 
6912     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6913         return -TARGET_EFAULT;
6914     }
6915 
6916     l_type = host_to_target_flock(fl->l_type);
6917     __put_user(l_type, &target_fl->l_type);
6918     __put_user(fl->l_whence, &target_fl->l_whence);
6919     __put_user(fl->l_start, &target_fl->l_start);
6920     __put_user(fl->l_len, &target_fl->l_len);
6921     __put_user(fl->l_pid, &target_fl->l_pid);
6922     unlock_user_struct(target_fl, target_flock_addr, 1);
6923     return 0;
6924 }
6925 #endif
6926 
6927 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6928                                               abi_ulong target_flock_addr)
6929 {
6930     struct target_flock64 *target_fl;
6931     int l_type;
6932 
6933     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6934         return -TARGET_EFAULT;
6935     }
6936 
6937     __get_user(l_type, &target_fl->l_type);
6938     l_type = target_to_host_flock(l_type);
6939     if (l_type < 0) {
6940         return l_type;
6941     }
6942     fl->l_type = l_type;
6943     __get_user(fl->l_whence, &target_fl->l_whence);
6944     __get_user(fl->l_start, &target_fl->l_start);
6945     __get_user(fl->l_len, &target_fl->l_len);
6946     __get_user(fl->l_pid, &target_fl->l_pid);
6947     unlock_user_struct(target_fl, target_flock_addr, 0);
6948     return 0;
6949 }
6950 
6951 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6952                                             const struct flock64 *fl)
6953 {
6954     struct target_flock64 *target_fl;
6955     short l_type;
6956 
6957     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6958         return -TARGET_EFAULT;
6959     }
6960 
6961     l_type = host_to_target_flock(fl->l_type);
6962     __put_user(l_type, &target_fl->l_type);
6963     __put_user(fl->l_whence, &target_fl->l_whence);
6964     __put_user(fl->l_start, &target_fl->l_start);
6965     __put_user(fl->l_len, &target_fl->l_len);
6966     __put_user(fl->l_pid, &target_fl->l_pid);
6967     unlock_user_struct(target_fl, target_flock_addr, 1);
6968     return 0;
6969 }
6970 
6971 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6972 {
6973     struct flock64 fl64;
6974 #ifdef F_GETOWN_EX
6975     struct f_owner_ex fox;
6976     struct target_f_owner_ex *target_fox;
6977 #endif
6978     abi_long ret;
6979     int host_cmd = target_to_host_fcntl_cmd(cmd);
6980 
6981     if (host_cmd == -TARGET_EINVAL)
6982 	    return host_cmd;
6983 
6984     switch(cmd) {
6985     case TARGET_F_GETLK:
6986         ret = copy_from_user_flock(&fl64, arg);
6987         if (ret) {
6988             return ret;
6989         }
6990         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6991         if (ret == 0) {
6992             ret = copy_to_user_flock(arg, &fl64);
6993         }
6994         break;
6995 
6996     case TARGET_F_SETLK:
6997     case TARGET_F_SETLKW:
6998         ret = copy_from_user_flock(&fl64, arg);
6999         if (ret) {
7000             return ret;
7001         }
7002         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7003         break;
7004 
7005     case TARGET_F_GETLK64:
7006     case TARGET_F_OFD_GETLK:
7007         ret = copy_from_user_flock64(&fl64, arg);
7008         if (ret) {
7009             return ret;
7010         }
7011         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7012         if (ret == 0) {
7013             ret = copy_to_user_flock64(arg, &fl64);
7014         }
7015         break;
7016     case TARGET_F_SETLK64:
7017     case TARGET_F_SETLKW64:
7018     case TARGET_F_OFD_SETLK:
7019     case TARGET_F_OFD_SETLKW:
7020         ret = copy_from_user_flock64(&fl64, arg);
7021         if (ret) {
7022             return ret;
7023         }
7024         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7025         break;
7026 
7027     case TARGET_F_GETFL:
7028         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7029         if (ret >= 0) {
7030             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7031         }
7032         break;
7033 
7034     case TARGET_F_SETFL:
7035         ret = get_errno(safe_fcntl(fd, host_cmd,
7036                                    target_to_host_bitmask(arg,
7037                                                           fcntl_flags_tbl)));
7038         break;
7039 
7040 #ifdef F_GETOWN_EX
7041     case TARGET_F_GETOWN_EX:
7042         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7043         if (ret >= 0) {
7044             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7045                 return -TARGET_EFAULT;
7046             target_fox->type = tswap32(fox.type);
7047             target_fox->pid = tswap32(fox.pid);
7048             unlock_user_struct(target_fox, arg, 1);
7049         }
7050         break;
7051 #endif
7052 
7053 #ifdef F_SETOWN_EX
7054     case TARGET_F_SETOWN_EX:
7055         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7056             return -TARGET_EFAULT;
7057         fox.type = tswap32(target_fox->type);
7058         fox.pid = tswap32(target_fox->pid);
7059         unlock_user_struct(target_fox, arg, 0);
7060         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7061         break;
7062 #endif
7063 
7064     case TARGET_F_SETSIG:
7065         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7066         break;
7067 
7068     case TARGET_F_GETSIG:
7069         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7070         break;
7071 
7072     case TARGET_F_SETOWN:
7073     case TARGET_F_GETOWN:
7074     case TARGET_F_SETLEASE:
7075     case TARGET_F_GETLEASE:
7076     case TARGET_F_SETPIPE_SZ:
7077     case TARGET_F_GETPIPE_SZ:
7078     case TARGET_F_ADD_SEALS:
7079     case TARGET_F_GET_SEALS:
7080         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7081         break;
7082 
7083     default:
7084         ret = get_errno(safe_fcntl(fd, cmd, arg));
7085         break;
7086     }
7087     return ret;
7088 }
7089 
7090 #ifdef USE_UID16
7091 
7092 static inline int high2lowuid(int uid)
7093 {
7094     if (uid > 65535)
7095         return 65534;
7096     else
7097         return uid;
7098 }
7099 
7100 static inline int high2lowgid(int gid)
7101 {
7102     if (gid > 65535)
7103         return 65534;
7104     else
7105         return gid;
7106 }
7107 
7108 static inline int low2highuid(int uid)
7109 {
7110     if ((int16_t)uid == -1)
7111         return -1;
7112     else
7113         return uid;
7114 }
7115 
7116 static inline int low2highgid(int gid)
7117 {
7118     if ((int16_t)gid == -1)
7119         return -1;
7120     else
7121         return gid;
7122 }
7123 static inline int tswapid(int id)
7124 {
7125     return tswap16(id);
7126 }
7127 
7128 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7129 
7130 #else /* !USE_UID16 */
7131 static inline int high2lowuid(int uid)
7132 {
7133     return uid;
7134 }
7135 static inline int high2lowgid(int gid)
7136 {
7137     return gid;
7138 }
7139 static inline int low2highuid(int uid)
7140 {
7141     return uid;
7142 }
7143 static inline int low2highgid(int gid)
7144 {
7145     return gid;
7146 }
7147 static inline int tswapid(int id)
7148 {
7149     return tswap32(id);
7150 }
7151 
7152 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7153 
7154 #endif /* USE_UID16 */
7155 
7156 /* We must do direct syscalls for setting UID/GID, because we want to
7157  * implement the Linux system call semantics of "change only for this thread",
7158  * not the libc/POSIX semantics of "change for all threads in process".
7159  * (See http://ewontfix.com/17/ for more details.)
7160  * We use the 32-bit version of the syscalls if present; if it is not
7161  * then either the host architecture supports 32-bit UIDs natively with
7162  * the standard syscall, or the 16-bit UID is the best we can do.
7163  */
7164 #ifdef __NR_setuid32
7165 #define __NR_sys_setuid __NR_setuid32
7166 #else
7167 #define __NR_sys_setuid __NR_setuid
7168 #endif
7169 #ifdef __NR_setgid32
7170 #define __NR_sys_setgid __NR_setgid32
7171 #else
7172 #define __NR_sys_setgid __NR_setgid
7173 #endif
7174 #ifdef __NR_setresuid32
7175 #define __NR_sys_setresuid __NR_setresuid32
7176 #else
7177 #define __NR_sys_setresuid __NR_setresuid
7178 #endif
7179 #ifdef __NR_setresgid32
7180 #define __NR_sys_setresgid __NR_setresgid32
7181 #else
7182 #define __NR_sys_setresgid __NR_setresgid
7183 #endif
7184 
7185 _syscall1(int, sys_setuid, uid_t, uid)
7186 _syscall1(int, sys_setgid, gid_t, gid)
7187 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7188 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7189 
7190 void syscall_init(void)
7191 {
7192     IOCTLEntry *ie;
7193     const argtype *arg_type;
7194     int size;
7195 
7196     thunk_init(STRUCT_MAX);
7197 
7198 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7199 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7200 #include "syscall_types.h"
7201 #undef STRUCT
7202 #undef STRUCT_SPECIAL
7203 
7204     /* we patch the ioctl size if necessary. We rely on the fact that
7205        no ioctl has all the bits at '1' in the size field */
7206     ie = ioctl_entries;
7207     while (ie->target_cmd != 0) {
7208         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7209             TARGET_IOC_SIZEMASK) {
7210             arg_type = ie->arg_type;
7211             if (arg_type[0] != TYPE_PTR) {
7212                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7213                         ie->target_cmd);
7214                 exit(1);
7215             }
7216             arg_type++;
7217             size = thunk_type_size(arg_type, 0);
7218             ie->target_cmd = (ie->target_cmd &
7219                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7220                 (size << TARGET_IOC_SIZESHIFT);
7221         }
7222 
7223         /* automatic consistency check if same arch */
7224 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7225     (defined(__x86_64__) && defined(TARGET_X86_64))
7226         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7227             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7228                     ie->name, ie->target_cmd, ie->host_cmd);
7229         }
7230 #endif
7231         ie++;
7232     }
7233 }
7234 
7235 #ifdef TARGET_NR_truncate64
7236 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7237                                          abi_long arg2,
7238                                          abi_long arg3,
7239                                          abi_long arg4)
7240 {
7241     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7242         arg2 = arg3;
7243         arg3 = arg4;
7244     }
7245     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7246 }
7247 #endif
7248 
7249 #ifdef TARGET_NR_ftruncate64
7250 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7251                                           abi_long arg2,
7252                                           abi_long arg3,
7253                                           abi_long arg4)
7254 {
7255     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7256         arg2 = arg3;
7257         arg3 = arg4;
7258     }
7259     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7260 }
7261 #endif
7262 
7263 #if defined(TARGET_NR_timer_settime) || \
7264     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7265 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7266                                                  abi_ulong target_addr)
7267 {
7268     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7269                                 offsetof(struct target_itimerspec,
7270                                          it_interval)) ||
7271         target_to_host_timespec(&host_its->it_value, target_addr +
7272                                 offsetof(struct target_itimerspec,
7273                                          it_value))) {
7274         return -TARGET_EFAULT;
7275     }
7276 
7277     return 0;
7278 }
7279 #endif
7280 
7281 #if defined(TARGET_NR_timer_settime64) || \
7282     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7283 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7284                                                    abi_ulong target_addr)
7285 {
7286     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7287                                   offsetof(struct target__kernel_itimerspec,
7288                                            it_interval)) ||
7289         target_to_host_timespec64(&host_its->it_value, target_addr +
7290                                   offsetof(struct target__kernel_itimerspec,
7291                                            it_value))) {
7292         return -TARGET_EFAULT;
7293     }
7294 
7295     return 0;
7296 }
7297 #endif
7298 
7299 #if ((defined(TARGET_NR_timerfd_gettime) || \
7300       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7301       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7302 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7303                                                  struct itimerspec *host_its)
7304 {
7305     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7306                                                        it_interval),
7307                                 &host_its->it_interval) ||
7308         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7309                                                        it_value),
7310                                 &host_its->it_value)) {
7311         return -TARGET_EFAULT;
7312     }
7313     return 0;
7314 }
7315 #endif
7316 
7317 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7318       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7319       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7320 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7321                                                    struct itimerspec *host_its)
7322 {
7323     if (host_to_target_timespec64(target_addr +
7324                                   offsetof(struct target__kernel_itimerspec,
7325                                            it_interval),
7326                                   &host_its->it_interval) ||
7327         host_to_target_timespec64(target_addr +
7328                                   offsetof(struct target__kernel_itimerspec,
7329                                            it_value),
7330                                   &host_its->it_value)) {
7331         return -TARGET_EFAULT;
7332     }
7333     return 0;
7334 }
7335 #endif
7336 
7337 #if defined(TARGET_NR_adjtimex) || \
7338     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7339 static inline abi_long target_to_host_timex(struct timex *host_tx,
7340                                             abi_long target_addr)
7341 {
7342     struct target_timex *target_tx;
7343 
7344     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7345         return -TARGET_EFAULT;
7346     }
7347 
7348     __get_user(host_tx->modes, &target_tx->modes);
7349     __get_user(host_tx->offset, &target_tx->offset);
7350     __get_user(host_tx->freq, &target_tx->freq);
7351     __get_user(host_tx->maxerror, &target_tx->maxerror);
7352     __get_user(host_tx->esterror, &target_tx->esterror);
7353     __get_user(host_tx->status, &target_tx->status);
7354     __get_user(host_tx->constant, &target_tx->constant);
7355     __get_user(host_tx->precision, &target_tx->precision);
7356     __get_user(host_tx->tolerance, &target_tx->tolerance);
7357     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7358     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7359     __get_user(host_tx->tick, &target_tx->tick);
7360     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7361     __get_user(host_tx->jitter, &target_tx->jitter);
7362     __get_user(host_tx->shift, &target_tx->shift);
7363     __get_user(host_tx->stabil, &target_tx->stabil);
7364     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7365     __get_user(host_tx->calcnt, &target_tx->calcnt);
7366     __get_user(host_tx->errcnt, &target_tx->errcnt);
7367     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7368     __get_user(host_tx->tai, &target_tx->tai);
7369 
7370     unlock_user_struct(target_tx, target_addr, 0);
7371     return 0;
7372 }
7373 
7374 static inline abi_long host_to_target_timex(abi_long target_addr,
7375                                             struct timex *host_tx)
7376 {
7377     struct target_timex *target_tx;
7378 
7379     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7380         return -TARGET_EFAULT;
7381     }
7382 
7383     __put_user(host_tx->modes, &target_tx->modes);
7384     __put_user(host_tx->offset, &target_tx->offset);
7385     __put_user(host_tx->freq, &target_tx->freq);
7386     __put_user(host_tx->maxerror, &target_tx->maxerror);
7387     __put_user(host_tx->esterror, &target_tx->esterror);
7388     __put_user(host_tx->status, &target_tx->status);
7389     __put_user(host_tx->constant, &target_tx->constant);
7390     __put_user(host_tx->precision, &target_tx->precision);
7391     __put_user(host_tx->tolerance, &target_tx->tolerance);
7392     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7393     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7394     __put_user(host_tx->tick, &target_tx->tick);
7395     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7396     __put_user(host_tx->jitter, &target_tx->jitter);
7397     __put_user(host_tx->shift, &target_tx->shift);
7398     __put_user(host_tx->stabil, &target_tx->stabil);
7399     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7400     __put_user(host_tx->calcnt, &target_tx->calcnt);
7401     __put_user(host_tx->errcnt, &target_tx->errcnt);
7402     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7403     __put_user(host_tx->tai, &target_tx->tai);
7404 
7405     unlock_user_struct(target_tx, target_addr, 1);
7406     return 0;
7407 }
7408 #endif
7409 
7410 
7411 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7412 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7413                                               abi_long target_addr)
7414 {
7415     struct target__kernel_timex *target_tx;
7416 
7417     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7418                                  offsetof(struct target__kernel_timex,
7419                                           time))) {
7420         return -TARGET_EFAULT;
7421     }
7422 
7423     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7424         return -TARGET_EFAULT;
7425     }
7426 
7427     __get_user(host_tx->modes, &target_tx->modes);
7428     __get_user(host_tx->offset, &target_tx->offset);
7429     __get_user(host_tx->freq, &target_tx->freq);
7430     __get_user(host_tx->maxerror, &target_tx->maxerror);
7431     __get_user(host_tx->esterror, &target_tx->esterror);
7432     __get_user(host_tx->status, &target_tx->status);
7433     __get_user(host_tx->constant, &target_tx->constant);
7434     __get_user(host_tx->precision, &target_tx->precision);
7435     __get_user(host_tx->tolerance, &target_tx->tolerance);
7436     __get_user(host_tx->tick, &target_tx->tick);
7437     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7438     __get_user(host_tx->jitter, &target_tx->jitter);
7439     __get_user(host_tx->shift, &target_tx->shift);
7440     __get_user(host_tx->stabil, &target_tx->stabil);
7441     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7442     __get_user(host_tx->calcnt, &target_tx->calcnt);
7443     __get_user(host_tx->errcnt, &target_tx->errcnt);
7444     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7445     __get_user(host_tx->tai, &target_tx->tai);
7446 
7447     unlock_user_struct(target_tx, target_addr, 0);
7448     return 0;
7449 }
7450 
7451 static inline abi_long host_to_target_timex64(abi_long target_addr,
7452                                               struct timex *host_tx)
7453 {
7454     struct target__kernel_timex *target_tx;
7455 
7456    if (copy_to_user_timeval64(target_addr +
7457                               offsetof(struct target__kernel_timex, time),
7458                               &host_tx->time)) {
7459         return -TARGET_EFAULT;
7460     }
7461 
7462     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7463         return -TARGET_EFAULT;
7464     }
7465 
7466     __put_user(host_tx->modes, &target_tx->modes);
7467     __put_user(host_tx->offset, &target_tx->offset);
7468     __put_user(host_tx->freq, &target_tx->freq);
7469     __put_user(host_tx->maxerror, &target_tx->maxerror);
7470     __put_user(host_tx->esterror, &target_tx->esterror);
7471     __put_user(host_tx->status, &target_tx->status);
7472     __put_user(host_tx->constant, &target_tx->constant);
7473     __put_user(host_tx->precision, &target_tx->precision);
7474     __put_user(host_tx->tolerance, &target_tx->tolerance);
7475     __put_user(host_tx->tick, &target_tx->tick);
7476     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7477     __put_user(host_tx->jitter, &target_tx->jitter);
7478     __put_user(host_tx->shift, &target_tx->shift);
7479     __put_user(host_tx->stabil, &target_tx->stabil);
7480     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7481     __put_user(host_tx->calcnt, &target_tx->calcnt);
7482     __put_user(host_tx->errcnt, &target_tx->errcnt);
7483     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7484     __put_user(host_tx->tai, &target_tx->tai);
7485 
7486     unlock_user_struct(target_tx, target_addr, 1);
7487     return 0;
7488 }
7489 #endif
7490 
7491 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7492 #define sigev_notify_thread_id _sigev_un._tid
7493 #endif
7494 
7495 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7496                                                abi_ulong target_addr)
7497 {
7498     struct target_sigevent *target_sevp;
7499 
7500     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7501         return -TARGET_EFAULT;
7502     }
7503 
7504     /* This union is awkward on 64 bit systems because it has a 32 bit
7505      * integer and a pointer in it; we follow the conversion approach
7506      * used for handling sigval types in signal.c so the guest should get
7507      * the correct value back even if we did a 64 bit byteswap and it's
7508      * using the 32 bit integer.
7509      */
7510     host_sevp->sigev_value.sival_ptr =
7511         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7512     host_sevp->sigev_signo =
7513         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7514     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7515     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7516 
7517     unlock_user_struct(target_sevp, target_addr, 1);
7518     return 0;
7519 }
7520 
7521 #if defined(TARGET_NR_mlockall)
7522 static inline int target_to_host_mlockall_arg(int arg)
7523 {
7524     int result = 0;
7525 
7526     if (arg & TARGET_MCL_CURRENT) {
7527         result |= MCL_CURRENT;
7528     }
7529     if (arg & TARGET_MCL_FUTURE) {
7530         result |= MCL_FUTURE;
7531     }
7532 #ifdef MCL_ONFAULT
7533     if (arg & TARGET_MCL_ONFAULT) {
7534         result |= MCL_ONFAULT;
7535     }
7536 #endif
7537 
7538     return result;
7539 }
7540 #endif
7541 
7542 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7543      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7544      defined(TARGET_NR_newfstatat))
7545 static inline abi_long host_to_target_stat64(void *cpu_env,
7546                                              abi_ulong target_addr,
7547                                              struct stat *host_st)
7548 {
7549 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7550     if (((CPUARMState *)cpu_env)->eabi) {
7551         struct target_eabi_stat64 *target_st;
7552 
7553         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7554             return -TARGET_EFAULT;
7555         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7556         __put_user(host_st->st_dev, &target_st->st_dev);
7557         __put_user(host_st->st_ino, &target_st->st_ino);
7558 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7559         __put_user(host_st->st_ino, &target_st->__st_ino);
7560 #endif
7561         __put_user(host_st->st_mode, &target_st->st_mode);
7562         __put_user(host_st->st_nlink, &target_st->st_nlink);
7563         __put_user(host_st->st_uid, &target_st->st_uid);
7564         __put_user(host_st->st_gid, &target_st->st_gid);
7565         __put_user(host_st->st_rdev, &target_st->st_rdev);
7566         __put_user(host_st->st_size, &target_st->st_size);
7567         __put_user(host_st->st_blksize, &target_st->st_blksize);
7568         __put_user(host_st->st_blocks, &target_st->st_blocks);
7569         __put_user(host_st->st_atime, &target_st->target_st_atime);
7570         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7571         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7572 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7573         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7574         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7575         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7576 #endif
7577         unlock_user_struct(target_st, target_addr, 1);
7578     } else
7579 #endif
7580     {
7581 #if defined(TARGET_HAS_STRUCT_STAT64)
7582         struct target_stat64 *target_st;
7583 #else
7584         struct target_stat *target_st;
7585 #endif
7586 
7587         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7588             return -TARGET_EFAULT;
7589         memset(target_st, 0, sizeof(*target_st));
7590         __put_user(host_st->st_dev, &target_st->st_dev);
7591         __put_user(host_st->st_ino, &target_st->st_ino);
7592 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7593         __put_user(host_st->st_ino, &target_st->__st_ino);
7594 #endif
7595         __put_user(host_st->st_mode, &target_st->st_mode);
7596         __put_user(host_st->st_nlink, &target_st->st_nlink);
7597         __put_user(host_st->st_uid, &target_st->st_uid);
7598         __put_user(host_st->st_gid, &target_st->st_gid);
7599         __put_user(host_st->st_rdev, &target_st->st_rdev);
7600         /* XXX: better use of kernel struct */
7601         __put_user(host_st->st_size, &target_st->st_size);
7602         __put_user(host_st->st_blksize, &target_st->st_blksize);
7603         __put_user(host_st->st_blocks, &target_st->st_blocks);
7604         __put_user(host_st->st_atime, &target_st->target_st_atime);
7605         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7606         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7607 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7608         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7609         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7610         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7611 #endif
7612         unlock_user_struct(target_st, target_addr, 1);
7613     }
7614 
7615     return 0;
7616 }
7617 #endif
7618 
7619 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7620 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7621                                             abi_ulong target_addr)
7622 {
7623     struct target_statx *target_stx;
7624 
7625     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7626         return -TARGET_EFAULT;
7627     }
7628     memset(target_stx, 0, sizeof(*target_stx));
7629 
7630     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7631     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7632     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7633     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7634     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7635     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7636     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7637     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7638     __put_user(host_stx->stx_size, &target_stx->stx_size);
7639     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7640     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7641     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7642     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7643     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7644     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7645     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7646     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7647     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7648     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7649     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7650     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7651     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7652     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7653 
7654     unlock_user_struct(target_stx, target_addr, 1);
7655 
7656     return 0;
7657 }
7658 #endif
7659 
7660 static int do_sys_futex(int *uaddr, int op, int val,
7661                          const struct timespec *timeout, int *uaddr2,
7662                          int val3)
7663 {
7664 #if HOST_LONG_BITS == 64
7665 #if defined(__NR_futex)
7666     /* always a 64-bit time_t, it doesn't define _time64 version  */
7667     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7668 
7669 #endif
7670 #else /* HOST_LONG_BITS == 64 */
7671 #if defined(__NR_futex_time64)
7672     if (sizeof(timeout->tv_sec) == 8) {
7673         /* _time64 function on 32bit arch */
7674         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7675     }
7676 #endif
7677 #if defined(__NR_futex)
7678     /* old function on 32bit arch */
7679     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7680 #endif
7681 #endif /* HOST_LONG_BITS == 64 */
7682     g_assert_not_reached();
7683 }
7684 
7685 static int do_safe_futex(int *uaddr, int op, int val,
7686                          const struct timespec *timeout, int *uaddr2,
7687                          int val3)
7688 {
7689 #if HOST_LONG_BITS == 64
7690 #if defined(__NR_futex)
7691     /* always a 64-bit time_t, it doesn't define _time64 version  */
7692     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7693 #endif
7694 #else /* HOST_LONG_BITS == 64 */
7695 #if defined(__NR_futex_time64)
7696     if (sizeof(timeout->tv_sec) == 8) {
7697         /* _time64 function on 32bit arch */
7698         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7699                                            val3));
7700     }
7701 #endif
7702 #if defined(__NR_futex)
7703     /* old function on 32bit arch */
7704     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7705 #endif
7706 #endif /* HOST_LONG_BITS == 64 */
7707     return -TARGET_ENOSYS;
7708 }
7709 
7710 /* ??? Using host futex calls even when target atomic operations
7711    are not really atomic probably breaks things.  However implementing
7712    futexes locally would make futexes shared between multiple processes
7713    tricky.  However they're probably useless because guest atomic
7714    operations won't work either.  */
7715 #if defined(TARGET_NR_futex)
7716 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7717                     target_ulong timeout, target_ulong uaddr2, int val3)
7718 {
7719     struct timespec ts, *pts;
7720     int base_op;
7721 
7722     /* ??? We assume FUTEX_* constants are the same on both host
7723        and target.  */
7724 #ifdef FUTEX_CMD_MASK
7725     base_op = op & FUTEX_CMD_MASK;
7726 #else
7727     base_op = op;
7728 #endif
7729     switch (base_op) {
7730     case FUTEX_WAIT:
7731     case FUTEX_WAIT_BITSET:
7732         if (timeout) {
7733             pts = &ts;
7734             target_to_host_timespec(pts, timeout);
7735         } else {
7736             pts = NULL;
7737         }
7738         return do_safe_futex(g2h(cpu, uaddr),
7739                              op, tswap32(val), pts, NULL, val3);
7740     case FUTEX_WAKE:
7741         return do_safe_futex(g2h(cpu, uaddr),
7742                              op, val, NULL, NULL, 0);
7743     case FUTEX_FD:
7744         return do_safe_futex(g2h(cpu, uaddr),
7745                              op, val, NULL, NULL, 0);
7746     case FUTEX_REQUEUE:
7747     case FUTEX_CMP_REQUEUE:
7748     case FUTEX_WAKE_OP:
7749         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7750            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7751            But the prototype takes a `struct timespec *'; insert casts
7752            to satisfy the compiler.  We do not need to tswap TIMEOUT
7753            since it's not compared to guest memory.  */
7754         pts = (struct timespec *)(uintptr_t) timeout;
7755         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7756                              (base_op == FUTEX_CMP_REQUEUE
7757                               ? tswap32(val3) : val3));
7758     default:
7759         return -TARGET_ENOSYS;
7760     }
7761 }
7762 #endif
7763 
7764 #if defined(TARGET_NR_futex_time64)
7765 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7766                            int val, target_ulong timeout,
7767                            target_ulong uaddr2, int val3)
7768 {
7769     struct timespec ts, *pts;
7770     int base_op;
7771 
7772     /* ??? We assume FUTEX_* constants are the same on both host
7773        and target.  */
7774 #ifdef FUTEX_CMD_MASK
7775     base_op = op & FUTEX_CMD_MASK;
7776 #else
7777     base_op = op;
7778 #endif
7779     switch (base_op) {
7780     case FUTEX_WAIT:
7781     case FUTEX_WAIT_BITSET:
7782         if (timeout) {
7783             pts = &ts;
7784             if (target_to_host_timespec64(pts, timeout)) {
7785                 return -TARGET_EFAULT;
7786             }
7787         } else {
7788             pts = NULL;
7789         }
7790         return do_safe_futex(g2h(cpu, uaddr), op,
7791                              tswap32(val), pts, NULL, val3);
7792     case FUTEX_WAKE:
7793         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7794     case FUTEX_FD:
7795         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7796     case FUTEX_REQUEUE:
7797     case FUTEX_CMP_REQUEUE:
7798     case FUTEX_WAKE_OP:
7799         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7800            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7801            But the prototype takes a `struct timespec *'; insert casts
7802            to satisfy the compiler.  We do not need to tswap TIMEOUT
7803            since it's not compared to guest memory.  */
7804         pts = (struct timespec *)(uintptr_t) timeout;
7805         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7806                              (base_op == FUTEX_CMP_REQUEUE
7807                               ? tswap32(val3) : val3));
7808     default:
7809         return -TARGET_ENOSYS;
7810     }
7811 }
7812 #endif
7813 
7814 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7815 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7816                                      abi_long handle, abi_long mount_id,
7817                                      abi_long flags)
7818 {
7819     struct file_handle *target_fh;
7820     struct file_handle *fh;
7821     int mid = 0;
7822     abi_long ret;
7823     char *name;
7824     unsigned int size, total_size;
7825 
7826     if (get_user_s32(size, handle)) {
7827         return -TARGET_EFAULT;
7828     }
7829 
7830     name = lock_user_string(pathname);
7831     if (!name) {
7832         return -TARGET_EFAULT;
7833     }
7834 
7835     total_size = sizeof(struct file_handle) + size;
7836     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7837     if (!target_fh) {
7838         unlock_user(name, pathname, 0);
7839         return -TARGET_EFAULT;
7840     }
7841 
7842     fh = g_malloc0(total_size);
7843     fh->handle_bytes = size;
7844 
7845     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7846     unlock_user(name, pathname, 0);
7847 
7848     /* man name_to_handle_at(2):
7849      * Other than the use of the handle_bytes field, the caller should treat
7850      * the file_handle structure as an opaque data type
7851      */
7852 
7853     memcpy(target_fh, fh, total_size);
7854     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7855     target_fh->handle_type = tswap32(fh->handle_type);
7856     g_free(fh);
7857     unlock_user(target_fh, handle, total_size);
7858 
7859     if (put_user_s32(mid, mount_id)) {
7860         return -TARGET_EFAULT;
7861     }
7862 
7863     return ret;
7864 
7865 }
7866 #endif
7867 
7868 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7869 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7870                                      abi_long flags)
7871 {
7872     struct file_handle *target_fh;
7873     struct file_handle *fh;
7874     unsigned int size, total_size;
7875     abi_long ret;
7876 
7877     if (get_user_s32(size, handle)) {
7878         return -TARGET_EFAULT;
7879     }
7880 
7881     total_size = sizeof(struct file_handle) + size;
7882     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7883     if (!target_fh) {
7884         return -TARGET_EFAULT;
7885     }
7886 
7887     fh = g_memdup(target_fh, total_size);
7888     fh->handle_bytes = size;
7889     fh->handle_type = tswap32(target_fh->handle_type);
7890 
7891     ret = get_errno(open_by_handle_at(mount_fd, fh,
7892                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7893 
7894     g_free(fh);
7895 
7896     unlock_user(target_fh, handle, total_size);
7897 
7898     return ret;
7899 }
7900 #endif
7901 
7902 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7903 
7904 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7905 {
7906     int host_flags;
7907     target_sigset_t *target_mask;
7908     sigset_t host_mask;
7909     abi_long ret;
7910 
7911     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7912         return -TARGET_EINVAL;
7913     }
7914     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7915         return -TARGET_EFAULT;
7916     }
7917 
7918     target_to_host_sigset(&host_mask, target_mask);
7919 
7920     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7921 
7922     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7923     if (ret >= 0) {
7924         fd_trans_register(ret, &target_signalfd_trans);
7925     }
7926 
7927     unlock_user_struct(target_mask, mask, 0);
7928 
7929     return ret;
7930 }
7931 #endif
7932 
7933 /* Map host to target signal numbers for the wait family of syscalls.
7934    Assume all other status bits are the same.  */
7935 int host_to_target_waitstatus(int status)
7936 {
7937     if (WIFSIGNALED(status)) {
7938         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7939     }
7940     if (WIFSTOPPED(status)) {
7941         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7942                | (status & 0xff);
7943     }
7944     return status;
7945 }
7946 
7947 static int open_self_cmdline(void *cpu_env, int fd)
7948 {
7949     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7950     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7951     int i;
7952 
7953     for (i = 0; i < bprm->argc; i++) {
7954         size_t len = strlen(bprm->argv[i]) + 1;
7955 
7956         if (write(fd, bprm->argv[i], len) != len) {
7957             return -1;
7958         }
7959     }
7960 
7961     return 0;
7962 }
7963 
7964 static int open_self_maps(void *cpu_env, int fd)
7965 {
7966     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7967     TaskState *ts = cpu->opaque;
7968     GSList *map_info = read_self_maps();
7969     GSList *s;
7970     int count;
7971 
7972     for (s = map_info; s; s = g_slist_next(s)) {
7973         MapInfo *e = (MapInfo *) s->data;
7974 
7975         if (h2g_valid(e->start)) {
7976             unsigned long min = e->start;
7977             unsigned long max = e->end;
7978             int flags = page_get_flags(h2g(min));
7979             const char *path;
7980 
7981             max = h2g_valid(max - 1) ?
7982                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7983 
7984             if (page_check_range(h2g(min), max - min, flags) == -1) {
7985                 continue;
7986             }
7987 
7988             if (h2g(min) == ts->info->stack_limit) {
7989                 path = "[stack]";
7990             } else {
7991                 path = e->path;
7992             }
7993 
7994             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7995                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7996                             h2g(min), h2g(max - 1) + 1,
7997                             (flags & PAGE_READ) ? 'r' : '-',
7998                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7999                             (flags & PAGE_EXEC) ? 'x' : '-',
8000                             e->is_priv ? 'p' : '-',
8001                             (uint64_t) e->offset, e->dev, e->inode);
8002             if (path) {
8003                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8004             } else {
8005                 dprintf(fd, "\n");
8006             }
8007         }
8008     }
8009 
8010     free_self_maps(map_info);
8011 
8012 #ifdef TARGET_VSYSCALL_PAGE
8013     /*
8014      * We only support execution from the vsyscall page.
8015      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8016      */
8017     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8018                     " --xp 00000000 00:00 0",
8019                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8020     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8021 #endif
8022 
8023     return 0;
8024 }
8025 
8026 static int open_self_stat(void *cpu_env, int fd)
8027 {
8028     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8029     TaskState *ts = cpu->opaque;
8030     g_autoptr(GString) buf = g_string_new(NULL);
8031     int i;
8032 
8033     for (i = 0; i < 44; i++) {
8034         if (i == 0) {
8035             /* pid */
8036             g_string_printf(buf, FMT_pid " ", getpid());
8037         } else if (i == 1) {
8038             /* app name */
8039             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8040             bin = bin ? bin + 1 : ts->bprm->argv[0];
8041             g_string_printf(buf, "(%.15s) ", bin);
8042         } else if (i == 3) {
8043             /* ppid */
8044             g_string_printf(buf, FMT_pid " ", getppid());
8045         } else if (i == 27) {
8046             /* stack bottom */
8047             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8048         } else {
8049             /* for the rest, there is MasterCard */
8050             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8051         }
8052 
8053         if (write(fd, buf->str, buf->len) != buf->len) {
8054             return -1;
8055         }
8056     }
8057 
8058     return 0;
8059 }
8060 
8061 static int open_self_auxv(void *cpu_env, int fd)
8062 {
8063     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8064     TaskState *ts = cpu->opaque;
8065     abi_ulong auxv = ts->info->saved_auxv;
8066     abi_ulong len = ts->info->auxv_len;
8067     char *ptr;
8068 
8069     /*
8070      * Auxiliary vector is stored in target process stack.
8071      * read in whole auxv vector and copy it to file
8072      */
8073     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8074     if (ptr != NULL) {
8075         while (len > 0) {
8076             ssize_t r;
8077             r = write(fd, ptr, len);
8078             if (r <= 0) {
8079                 break;
8080             }
8081             len -= r;
8082             ptr += r;
8083         }
8084         lseek(fd, 0, SEEK_SET);
8085         unlock_user(ptr, auxv, len);
8086     }
8087 
8088     return 0;
8089 }
8090 
8091 static int is_proc_myself(const char *filename, const char *entry)
8092 {
8093     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8094         filename += strlen("/proc/");
8095         if (!strncmp(filename, "self/", strlen("self/"))) {
8096             filename += strlen("self/");
8097         } else if (*filename >= '1' && *filename <= '9') {
8098             char myself[80];
8099             snprintf(myself, sizeof(myself), "%d/", getpid());
8100             if (!strncmp(filename, myself, strlen(myself))) {
8101                 filename += strlen(myself);
8102             } else {
8103                 return 0;
8104             }
8105         } else {
8106             return 0;
8107         }
8108         if (!strcmp(filename, entry)) {
8109             return 1;
8110         }
8111     }
8112     return 0;
8113 }
8114 
8115 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8116     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8117 static int is_proc(const char *filename, const char *entry)
8118 {
8119     return strcmp(filename, entry) == 0;
8120 }
8121 #endif
8122 
8123 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8124 static int open_net_route(void *cpu_env, int fd)
8125 {
8126     FILE *fp;
8127     char *line = NULL;
8128     size_t len = 0;
8129     ssize_t read;
8130 
8131     fp = fopen("/proc/net/route", "r");
8132     if (fp == NULL) {
8133         return -1;
8134     }
8135 
8136     /* read header */
8137 
8138     read = getline(&line, &len, fp);
8139     dprintf(fd, "%s", line);
8140 
8141     /* read routes */
8142 
8143     while ((read = getline(&line, &len, fp)) != -1) {
8144         char iface[16];
8145         uint32_t dest, gw, mask;
8146         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8147         int fields;
8148 
8149         fields = sscanf(line,
8150                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8151                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8152                         &mask, &mtu, &window, &irtt);
8153         if (fields != 11) {
8154             continue;
8155         }
8156         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8157                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8158                 metric, tswap32(mask), mtu, window, irtt);
8159     }
8160 
8161     free(line);
8162     fclose(fp);
8163 
8164     return 0;
8165 }
8166 #endif
8167 
8168 #if defined(TARGET_SPARC)
8169 static int open_cpuinfo(void *cpu_env, int fd)
8170 {
8171     dprintf(fd, "type\t\t: sun4u\n");
8172     return 0;
8173 }
8174 #endif
8175 
8176 #if defined(TARGET_HPPA)
8177 static int open_cpuinfo(void *cpu_env, int fd)
8178 {
8179     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8180     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8181     dprintf(fd, "capabilities\t: os32\n");
8182     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8183     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8184     return 0;
8185 }
8186 #endif
8187 
8188 #if defined(TARGET_M68K)
8189 static int open_hardware(void *cpu_env, int fd)
8190 {
8191     dprintf(fd, "Model:\t\tqemu-m68k\n");
8192     return 0;
8193 }
8194 #endif
8195 
8196 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8197 {
8198     struct fake_open {
8199         const char *filename;
8200         int (*fill)(void *cpu_env, int fd);
8201         int (*cmp)(const char *s1, const char *s2);
8202     };
8203     const struct fake_open *fake_open;
8204     static const struct fake_open fakes[] = {
8205         { "maps", open_self_maps, is_proc_myself },
8206         { "stat", open_self_stat, is_proc_myself },
8207         { "auxv", open_self_auxv, is_proc_myself },
8208         { "cmdline", open_self_cmdline, is_proc_myself },
8209 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8210         { "/proc/net/route", open_net_route, is_proc },
8211 #endif
8212 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8213         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8214 #endif
8215 #if defined(TARGET_M68K)
8216         { "/proc/hardware", open_hardware, is_proc },
8217 #endif
8218         { NULL, NULL, NULL }
8219     };
8220 
8221     if (is_proc_myself(pathname, "exe")) {
8222         int execfd = qemu_getauxval(AT_EXECFD);
8223         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8224     }
8225 
8226     for (fake_open = fakes; fake_open->filename; fake_open++) {
8227         if (fake_open->cmp(pathname, fake_open->filename)) {
8228             break;
8229         }
8230     }
8231 
8232     if (fake_open->filename) {
8233         const char *tmpdir;
8234         char filename[PATH_MAX];
8235         int fd, r;
8236 
8237         /* create temporary file to map stat to */
8238         tmpdir = getenv("TMPDIR");
8239         if (!tmpdir)
8240             tmpdir = "/tmp";
8241         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8242         fd = mkstemp(filename);
8243         if (fd < 0) {
8244             return fd;
8245         }
8246         unlink(filename);
8247 
8248         if ((r = fake_open->fill(cpu_env, fd))) {
8249             int e = errno;
8250             close(fd);
8251             errno = e;
8252             return r;
8253         }
8254         lseek(fd, 0, SEEK_SET);
8255 
8256         return fd;
8257     }
8258 
8259     return safe_openat(dirfd, path(pathname), flags, mode);
8260 }
8261 
8262 #define TIMER_MAGIC 0x0caf0000
8263 #define TIMER_MAGIC_MASK 0xffff0000
8264 
8265 /* Convert QEMU provided timer ID back to internal 16bit index format */
8266 static target_timer_t get_timer_id(abi_long arg)
8267 {
8268     target_timer_t timerid = arg;
8269 
8270     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8271         return -TARGET_EINVAL;
8272     }
8273 
8274     timerid &= 0xffff;
8275 
8276     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8277         return -TARGET_EINVAL;
8278     }
8279 
8280     return timerid;
8281 }
8282 
8283 static int target_to_host_cpu_mask(unsigned long *host_mask,
8284                                    size_t host_size,
8285                                    abi_ulong target_addr,
8286                                    size_t target_size)
8287 {
8288     unsigned target_bits = sizeof(abi_ulong) * 8;
8289     unsigned host_bits = sizeof(*host_mask) * 8;
8290     abi_ulong *target_mask;
8291     unsigned i, j;
8292 
8293     assert(host_size >= target_size);
8294 
8295     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8296     if (!target_mask) {
8297         return -TARGET_EFAULT;
8298     }
8299     memset(host_mask, 0, host_size);
8300 
8301     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8302         unsigned bit = i * target_bits;
8303         abi_ulong val;
8304 
8305         __get_user(val, &target_mask[i]);
8306         for (j = 0; j < target_bits; j++, bit++) {
8307             if (val & (1UL << j)) {
8308                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8309             }
8310         }
8311     }
8312 
8313     unlock_user(target_mask, target_addr, 0);
8314     return 0;
8315 }
8316 
8317 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8318                                    size_t host_size,
8319                                    abi_ulong target_addr,
8320                                    size_t target_size)
8321 {
8322     unsigned target_bits = sizeof(abi_ulong) * 8;
8323     unsigned host_bits = sizeof(*host_mask) * 8;
8324     abi_ulong *target_mask;
8325     unsigned i, j;
8326 
8327     assert(host_size >= target_size);
8328 
8329     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8330     if (!target_mask) {
8331         return -TARGET_EFAULT;
8332     }
8333 
8334     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8335         unsigned bit = i * target_bits;
8336         abi_ulong val = 0;
8337 
8338         for (j = 0; j < target_bits; j++, bit++) {
8339             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8340                 val |= 1UL << j;
8341             }
8342         }
8343         __put_user(val, &target_mask[i]);
8344     }
8345 
8346     unlock_user(target_mask, target_addr, target_size);
8347     return 0;
8348 }
8349 
8350 #ifdef TARGET_NR_getdents
8351 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8352 {
8353     g_autofree void *hdirp = NULL;
8354     void *tdirp;
8355     int hlen, hoff, toff;
8356     int hreclen, treclen;
8357     off64_t prev_diroff = 0;
8358 
8359     hdirp = g_try_malloc(count);
8360     if (!hdirp) {
8361         return -TARGET_ENOMEM;
8362     }
8363 
8364 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8365     hlen = sys_getdents(dirfd, hdirp, count);
8366 #else
8367     hlen = sys_getdents64(dirfd, hdirp, count);
8368 #endif
8369 
8370     hlen = get_errno(hlen);
8371     if (is_error(hlen)) {
8372         return hlen;
8373     }
8374 
8375     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8376     if (!tdirp) {
8377         return -TARGET_EFAULT;
8378     }
8379 
8380     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8381 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8382         struct linux_dirent *hde = hdirp + hoff;
8383 #else
8384         struct linux_dirent64 *hde = hdirp + hoff;
8385 #endif
8386         struct target_dirent *tde = tdirp + toff;
8387         int namelen;
8388         uint8_t type;
8389 
8390         namelen = strlen(hde->d_name);
8391         hreclen = hde->d_reclen;
8392         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8393         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8394 
8395         if (toff + treclen > count) {
8396             /*
8397              * If the host struct is smaller than the target struct, or
8398              * requires less alignment and thus packs into less space,
8399              * then the host can return more entries than we can pass
8400              * on to the guest.
8401              */
8402             if (toff == 0) {
8403                 toff = -TARGET_EINVAL; /* result buffer is too small */
8404                 break;
8405             }
8406             /*
8407              * Return what we have, resetting the file pointer to the
8408              * location of the first record not returned.
8409              */
8410             lseek64(dirfd, prev_diroff, SEEK_SET);
8411             break;
8412         }
8413 
8414         prev_diroff = hde->d_off;
8415         tde->d_ino = tswapal(hde->d_ino);
8416         tde->d_off = tswapal(hde->d_off);
8417         tde->d_reclen = tswap16(treclen);
8418         memcpy(tde->d_name, hde->d_name, namelen + 1);
8419 
8420         /*
8421          * The getdents type is in what was formerly a padding byte at the
8422          * end of the structure.
8423          */
8424 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8425         type = *((uint8_t *)hde + hreclen - 1);
8426 #else
8427         type = hde->d_type;
8428 #endif
8429         *((uint8_t *)tde + treclen - 1) = type;
8430     }
8431 
8432     unlock_user(tdirp, arg2, toff);
8433     return toff;
8434 }
8435 #endif /* TARGET_NR_getdents */
8436 
8437 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8438 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8439 {
8440     g_autofree void *hdirp = NULL;
8441     void *tdirp;
8442     int hlen, hoff, toff;
8443     int hreclen, treclen;
8444     off64_t prev_diroff = 0;
8445 
8446     hdirp = g_try_malloc(count);
8447     if (!hdirp) {
8448         return -TARGET_ENOMEM;
8449     }
8450 
8451     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8452     if (is_error(hlen)) {
8453         return hlen;
8454     }
8455 
8456     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8457     if (!tdirp) {
8458         return -TARGET_EFAULT;
8459     }
8460 
8461     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8462         struct linux_dirent64 *hde = hdirp + hoff;
8463         struct target_dirent64 *tde = tdirp + toff;
8464         int namelen;
8465 
8466         namelen = strlen(hde->d_name) + 1;
8467         hreclen = hde->d_reclen;
8468         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8469         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8470 
8471         if (toff + treclen > count) {
8472             /*
8473              * If the host struct is smaller than the target struct, or
8474              * requires less alignment and thus packs into less space,
8475              * then the host can return more entries than we can pass
8476              * on to the guest.
8477              */
8478             if (toff == 0) {
8479                 toff = -TARGET_EINVAL; /* result buffer is too small */
8480                 break;
8481             }
8482             /*
8483              * Return what we have, resetting the file pointer to the
8484              * location of the first record not returned.
8485              */
8486             lseek64(dirfd, prev_diroff, SEEK_SET);
8487             break;
8488         }
8489 
8490         prev_diroff = hde->d_off;
8491         tde->d_ino = tswap64(hde->d_ino);
8492         tde->d_off = tswap64(hde->d_off);
8493         tde->d_reclen = tswap16(treclen);
8494         tde->d_type = hde->d_type;
8495         memcpy(tde->d_name, hde->d_name, namelen);
8496     }
8497 
8498     unlock_user(tdirp, arg2, toff);
8499     return toff;
8500 }
8501 #endif /* TARGET_NR_getdents64 */
8502 
8503 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8504 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8505 #endif
8506 
8507 /* This is an internal helper for do_syscall so that it is easier
8508  * to have a single return point, so that actions, such as logging
8509  * of syscall results, can be performed.
8510  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8511  */
8512 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8513                             abi_long arg2, abi_long arg3, abi_long arg4,
8514                             abi_long arg5, abi_long arg6, abi_long arg7,
8515                             abi_long arg8)
8516 {
8517     CPUState *cpu = env_cpu(cpu_env);
8518     abi_long ret;
8519 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8520     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8521     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8522     || defined(TARGET_NR_statx)
8523     struct stat st;
8524 #endif
8525 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8526     || defined(TARGET_NR_fstatfs)
8527     struct statfs stfs;
8528 #endif
8529     void *p;
8530 
8531     switch(num) {
8532     case TARGET_NR_exit:
8533         /* In old applications this may be used to implement _exit(2).
8534            However in threaded applications it is used for thread termination,
8535            and _exit_group is used for application termination.
8536            Do thread termination if we have more then one thread.  */
8537 
8538         if (block_signals()) {
8539             return -QEMU_ERESTARTSYS;
8540         }
8541 
8542         pthread_mutex_lock(&clone_lock);
8543 
8544         if (CPU_NEXT(first_cpu)) {
8545             TaskState *ts = cpu->opaque;
8546 
8547             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8548             object_unref(OBJECT(cpu));
8549             /*
8550              * At this point the CPU should be unrealized and removed
8551              * from cpu lists. We can clean-up the rest of the thread
8552              * data without the lock held.
8553              */
8554 
8555             pthread_mutex_unlock(&clone_lock);
8556 
8557             if (ts->child_tidptr) {
8558                 put_user_u32(0, ts->child_tidptr);
8559                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8560                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8561             }
8562             thread_cpu = NULL;
8563             g_free(ts);
8564             rcu_unregister_thread();
8565             pthread_exit(NULL);
8566         }
8567 
8568         pthread_mutex_unlock(&clone_lock);
8569         preexit_cleanup(cpu_env, arg1);
8570         _exit(arg1);
8571         return 0; /* avoid warning */
8572     case TARGET_NR_read:
8573         if (arg2 == 0 && arg3 == 0) {
8574             return get_errno(safe_read(arg1, 0, 0));
8575         } else {
8576             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8577                 return -TARGET_EFAULT;
8578             ret = get_errno(safe_read(arg1, p, arg3));
8579             if (ret >= 0 &&
8580                 fd_trans_host_to_target_data(arg1)) {
8581                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8582             }
8583             unlock_user(p, arg2, ret);
8584         }
8585         return ret;
8586     case TARGET_NR_write:
8587         if (arg2 == 0 && arg3 == 0) {
8588             return get_errno(safe_write(arg1, 0, 0));
8589         }
8590         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8591             return -TARGET_EFAULT;
8592         if (fd_trans_target_to_host_data(arg1)) {
8593             void *copy = g_malloc(arg3);
8594             memcpy(copy, p, arg3);
8595             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8596             if (ret >= 0) {
8597                 ret = get_errno(safe_write(arg1, copy, ret));
8598             }
8599             g_free(copy);
8600         } else {
8601             ret = get_errno(safe_write(arg1, p, arg3));
8602         }
8603         unlock_user(p, arg2, 0);
8604         return ret;
8605 
8606 #ifdef TARGET_NR_open
8607     case TARGET_NR_open:
8608         if (!(p = lock_user_string(arg1)))
8609             return -TARGET_EFAULT;
8610         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8611                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8612                                   arg3));
8613         fd_trans_unregister(ret);
8614         unlock_user(p, arg1, 0);
8615         return ret;
8616 #endif
8617     case TARGET_NR_openat:
8618         if (!(p = lock_user_string(arg2)))
8619             return -TARGET_EFAULT;
8620         ret = get_errno(do_openat(cpu_env, arg1, p,
8621                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8622                                   arg4));
8623         fd_trans_unregister(ret);
8624         unlock_user(p, arg2, 0);
8625         return ret;
8626 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8627     case TARGET_NR_name_to_handle_at:
8628         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8629         return ret;
8630 #endif
8631 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8632     case TARGET_NR_open_by_handle_at:
8633         ret = do_open_by_handle_at(arg1, arg2, arg3);
8634         fd_trans_unregister(ret);
8635         return ret;
8636 #endif
8637     case TARGET_NR_close:
8638         fd_trans_unregister(arg1);
8639         return get_errno(close(arg1));
8640 
8641     case TARGET_NR_brk:
8642         return do_brk(arg1);
8643 #ifdef TARGET_NR_fork
8644     case TARGET_NR_fork:
8645         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8646 #endif
8647 #ifdef TARGET_NR_waitpid
8648     case TARGET_NR_waitpid:
8649         {
8650             int status;
8651             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8652             if (!is_error(ret) && arg2 && ret
8653                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8654                 return -TARGET_EFAULT;
8655         }
8656         return ret;
8657 #endif
8658 #ifdef TARGET_NR_waitid
8659     case TARGET_NR_waitid:
8660         {
8661             siginfo_t info;
8662             info.si_pid = 0;
8663             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8664             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8665                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8666                     return -TARGET_EFAULT;
8667                 host_to_target_siginfo(p, &info);
8668                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8669             }
8670         }
8671         return ret;
8672 #endif
8673 #ifdef TARGET_NR_creat /* not on alpha */
8674     case TARGET_NR_creat:
8675         if (!(p = lock_user_string(arg1)))
8676             return -TARGET_EFAULT;
8677         ret = get_errno(creat(p, arg2));
8678         fd_trans_unregister(ret);
8679         unlock_user(p, arg1, 0);
8680         return ret;
8681 #endif
8682 #ifdef TARGET_NR_link
8683     case TARGET_NR_link:
8684         {
8685             void * p2;
8686             p = lock_user_string(arg1);
8687             p2 = lock_user_string(arg2);
8688             if (!p || !p2)
8689                 ret = -TARGET_EFAULT;
8690             else
8691                 ret = get_errno(link(p, p2));
8692             unlock_user(p2, arg2, 0);
8693             unlock_user(p, arg1, 0);
8694         }
8695         return ret;
8696 #endif
8697 #if defined(TARGET_NR_linkat)
8698     case TARGET_NR_linkat:
8699         {
8700             void * p2 = NULL;
8701             if (!arg2 || !arg4)
8702                 return -TARGET_EFAULT;
8703             p  = lock_user_string(arg2);
8704             p2 = lock_user_string(arg4);
8705             if (!p || !p2)
8706                 ret = -TARGET_EFAULT;
8707             else
8708                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8709             unlock_user(p, arg2, 0);
8710             unlock_user(p2, arg4, 0);
8711         }
8712         return ret;
8713 #endif
8714 #ifdef TARGET_NR_unlink
8715     case TARGET_NR_unlink:
8716         if (!(p = lock_user_string(arg1)))
8717             return -TARGET_EFAULT;
8718         ret = get_errno(unlink(p));
8719         unlock_user(p, arg1, 0);
8720         return ret;
8721 #endif
8722 #if defined(TARGET_NR_unlinkat)
8723     case TARGET_NR_unlinkat:
8724         if (!(p = lock_user_string(arg2)))
8725             return -TARGET_EFAULT;
8726         ret = get_errno(unlinkat(arg1, p, arg3));
8727         unlock_user(p, arg2, 0);
8728         return ret;
8729 #endif
8730     case TARGET_NR_execve:
8731         {
8732             char **argp, **envp;
8733             int argc, envc;
8734             abi_ulong gp;
8735             abi_ulong guest_argp;
8736             abi_ulong guest_envp;
8737             abi_ulong addr;
8738             char **q;
8739 
8740             argc = 0;
8741             guest_argp = arg2;
8742             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8743                 if (get_user_ual(addr, gp))
8744                     return -TARGET_EFAULT;
8745                 if (!addr)
8746                     break;
8747                 argc++;
8748             }
8749             envc = 0;
8750             guest_envp = arg3;
8751             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8752                 if (get_user_ual(addr, gp))
8753                     return -TARGET_EFAULT;
8754                 if (!addr)
8755                     break;
8756                 envc++;
8757             }
8758 
8759             argp = g_new0(char *, argc + 1);
8760             envp = g_new0(char *, envc + 1);
8761 
8762             for (gp = guest_argp, q = argp; gp;
8763                   gp += sizeof(abi_ulong), q++) {
8764                 if (get_user_ual(addr, gp))
8765                     goto execve_efault;
8766                 if (!addr)
8767                     break;
8768                 if (!(*q = lock_user_string(addr)))
8769                     goto execve_efault;
8770             }
8771             *q = NULL;
8772 
8773             for (gp = guest_envp, q = envp; gp;
8774                   gp += sizeof(abi_ulong), q++) {
8775                 if (get_user_ual(addr, gp))
8776                     goto execve_efault;
8777                 if (!addr)
8778                     break;
8779                 if (!(*q = lock_user_string(addr)))
8780                     goto execve_efault;
8781             }
8782             *q = NULL;
8783 
8784             if (!(p = lock_user_string(arg1)))
8785                 goto execve_efault;
8786             /* Although execve() is not an interruptible syscall it is
8787              * a special case where we must use the safe_syscall wrapper:
8788              * if we allow a signal to happen before we make the host
8789              * syscall then we will 'lose' it, because at the point of
8790              * execve the process leaves QEMU's control. So we use the
8791              * safe syscall wrapper to ensure that we either take the
8792              * signal as a guest signal, or else it does not happen
8793              * before the execve completes and makes it the other
8794              * program's problem.
8795              */
8796             ret = get_errno(safe_execve(p, argp, envp));
8797             unlock_user(p, arg1, 0);
8798 
8799             goto execve_end;
8800 
8801         execve_efault:
8802             ret = -TARGET_EFAULT;
8803 
8804         execve_end:
8805             for (gp = guest_argp, q = argp; *q;
8806                   gp += sizeof(abi_ulong), q++) {
8807                 if (get_user_ual(addr, gp)
8808                     || !addr)
8809                     break;
8810                 unlock_user(*q, addr, 0);
8811             }
8812             for (gp = guest_envp, q = envp; *q;
8813                   gp += sizeof(abi_ulong), q++) {
8814                 if (get_user_ual(addr, gp)
8815                     || !addr)
8816                     break;
8817                 unlock_user(*q, addr, 0);
8818             }
8819 
8820             g_free(argp);
8821             g_free(envp);
8822         }
8823         return ret;
8824     case TARGET_NR_chdir:
8825         if (!(p = lock_user_string(arg1)))
8826             return -TARGET_EFAULT;
8827         ret = get_errno(chdir(p));
8828         unlock_user(p, arg1, 0);
8829         return ret;
8830 #ifdef TARGET_NR_time
8831     case TARGET_NR_time:
8832         {
8833             time_t host_time;
8834             ret = get_errno(time(&host_time));
8835             if (!is_error(ret)
8836                 && arg1
8837                 && put_user_sal(host_time, arg1))
8838                 return -TARGET_EFAULT;
8839         }
8840         return ret;
8841 #endif
8842 #ifdef TARGET_NR_mknod
8843     case TARGET_NR_mknod:
8844         if (!(p = lock_user_string(arg1)))
8845             return -TARGET_EFAULT;
8846         ret = get_errno(mknod(p, arg2, arg3));
8847         unlock_user(p, arg1, 0);
8848         return ret;
8849 #endif
8850 #if defined(TARGET_NR_mknodat)
8851     case TARGET_NR_mknodat:
8852         if (!(p = lock_user_string(arg2)))
8853             return -TARGET_EFAULT;
8854         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8855         unlock_user(p, arg2, 0);
8856         return ret;
8857 #endif
8858 #ifdef TARGET_NR_chmod
8859     case TARGET_NR_chmod:
8860         if (!(p = lock_user_string(arg1)))
8861             return -TARGET_EFAULT;
8862         ret = get_errno(chmod(p, arg2));
8863         unlock_user(p, arg1, 0);
8864         return ret;
8865 #endif
8866 #ifdef TARGET_NR_lseek
8867     case TARGET_NR_lseek:
8868         return get_errno(lseek(arg1, arg2, arg3));
8869 #endif
8870 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8871     /* Alpha specific */
8872     case TARGET_NR_getxpid:
8873         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8874         return get_errno(getpid());
8875 #endif
8876 #ifdef TARGET_NR_getpid
8877     case TARGET_NR_getpid:
8878         return get_errno(getpid());
8879 #endif
8880     case TARGET_NR_mount:
8881         {
8882             /* need to look at the data field */
8883             void *p2, *p3;
8884 
8885             if (arg1) {
8886                 p = lock_user_string(arg1);
8887                 if (!p) {
8888                     return -TARGET_EFAULT;
8889                 }
8890             } else {
8891                 p = NULL;
8892             }
8893 
8894             p2 = lock_user_string(arg2);
8895             if (!p2) {
8896                 if (arg1) {
8897                     unlock_user(p, arg1, 0);
8898                 }
8899                 return -TARGET_EFAULT;
8900             }
8901 
8902             if (arg3) {
8903                 p3 = lock_user_string(arg3);
8904                 if (!p3) {
8905                     if (arg1) {
8906                         unlock_user(p, arg1, 0);
8907                     }
8908                     unlock_user(p2, arg2, 0);
8909                     return -TARGET_EFAULT;
8910                 }
8911             } else {
8912                 p3 = NULL;
8913             }
8914 
8915             /* FIXME - arg5 should be locked, but it isn't clear how to
8916              * do that since it's not guaranteed to be a NULL-terminated
8917              * string.
8918              */
8919             if (!arg5) {
8920                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8921             } else {
8922                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8923             }
8924             ret = get_errno(ret);
8925 
8926             if (arg1) {
8927                 unlock_user(p, arg1, 0);
8928             }
8929             unlock_user(p2, arg2, 0);
8930             if (arg3) {
8931                 unlock_user(p3, arg3, 0);
8932             }
8933         }
8934         return ret;
8935 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8936 #if defined(TARGET_NR_umount)
8937     case TARGET_NR_umount:
8938 #endif
8939 #if defined(TARGET_NR_oldumount)
8940     case TARGET_NR_oldumount:
8941 #endif
8942         if (!(p = lock_user_string(arg1)))
8943             return -TARGET_EFAULT;
8944         ret = get_errno(umount(p));
8945         unlock_user(p, arg1, 0);
8946         return ret;
8947 #endif
8948 #ifdef TARGET_NR_stime /* not on alpha */
8949     case TARGET_NR_stime:
8950         {
8951             struct timespec ts;
8952             ts.tv_nsec = 0;
8953             if (get_user_sal(ts.tv_sec, arg1)) {
8954                 return -TARGET_EFAULT;
8955             }
8956             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8957         }
8958 #endif
8959 #ifdef TARGET_NR_alarm /* not on alpha */
8960     case TARGET_NR_alarm:
8961         return alarm(arg1);
8962 #endif
8963 #ifdef TARGET_NR_pause /* not on alpha */
8964     case TARGET_NR_pause:
8965         if (!block_signals()) {
8966             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8967         }
8968         return -TARGET_EINTR;
8969 #endif
8970 #ifdef TARGET_NR_utime
8971     case TARGET_NR_utime:
8972         {
8973             struct utimbuf tbuf, *host_tbuf;
8974             struct target_utimbuf *target_tbuf;
8975             if (arg2) {
8976                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8977                     return -TARGET_EFAULT;
8978                 tbuf.actime = tswapal(target_tbuf->actime);
8979                 tbuf.modtime = tswapal(target_tbuf->modtime);
8980                 unlock_user_struct(target_tbuf, arg2, 0);
8981                 host_tbuf = &tbuf;
8982             } else {
8983                 host_tbuf = NULL;
8984             }
8985             if (!(p = lock_user_string(arg1)))
8986                 return -TARGET_EFAULT;
8987             ret = get_errno(utime(p, host_tbuf));
8988             unlock_user(p, arg1, 0);
8989         }
8990         return ret;
8991 #endif
8992 #ifdef TARGET_NR_utimes
8993     case TARGET_NR_utimes:
8994         {
8995             struct timeval *tvp, tv[2];
8996             if (arg2) {
8997                 if (copy_from_user_timeval(&tv[0], arg2)
8998                     || copy_from_user_timeval(&tv[1],
8999                                               arg2 + sizeof(struct target_timeval)))
9000                     return -TARGET_EFAULT;
9001                 tvp = tv;
9002             } else {
9003                 tvp = NULL;
9004             }
9005             if (!(p = lock_user_string(arg1)))
9006                 return -TARGET_EFAULT;
9007             ret = get_errno(utimes(p, tvp));
9008             unlock_user(p, arg1, 0);
9009         }
9010         return ret;
9011 #endif
9012 #if defined(TARGET_NR_futimesat)
9013     case TARGET_NR_futimesat:
9014         {
9015             struct timeval *tvp, tv[2];
9016             if (arg3) {
9017                 if (copy_from_user_timeval(&tv[0], arg3)
9018                     || copy_from_user_timeval(&tv[1],
9019                                               arg3 + sizeof(struct target_timeval)))
9020                     return -TARGET_EFAULT;
9021                 tvp = tv;
9022             } else {
9023                 tvp = NULL;
9024             }
9025             if (!(p = lock_user_string(arg2))) {
9026                 return -TARGET_EFAULT;
9027             }
9028             ret = get_errno(futimesat(arg1, path(p), tvp));
9029             unlock_user(p, arg2, 0);
9030         }
9031         return ret;
9032 #endif
9033 #ifdef TARGET_NR_access
9034     case TARGET_NR_access:
9035         if (!(p = lock_user_string(arg1))) {
9036             return -TARGET_EFAULT;
9037         }
9038         ret = get_errno(access(path(p), arg2));
9039         unlock_user(p, arg1, 0);
9040         return ret;
9041 #endif
9042 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9043     case TARGET_NR_faccessat:
9044         if (!(p = lock_user_string(arg2))) {
9045             return -TARGET_EFAULT;
9046         }
9047         ret = get_errno(faccessat(arg1, p, arg3, 0));
9048         unlock_user(p, arg2, 0);
9049         return ret;
9050 #endif
9051 #ifdef TARGET_NR_nice /* not on alpha */
9052     case TARGET_NR_nice:
9053         return get_errno(nice(arg1));
9054 #endif
9055     case TARGET_NR_sync:
9056         sync();
9057         return 0;
9058 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9059     case TARGET_NR_syncfs:
9060         return get_errno(syncfs(arg1));
9061 #endif
9062     case TARGET_NR_kill:
9063         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9064 #ifdef TARGET_NR_rename
9065     case TARGET_NR_rename:
9066         {
9067             void *p2;
9068             p = lock_user_string(arg1);
9069             p2 = lock_user_string(arg2);
9070             if (!p || !p2)
9071                 ret = -TARGET_EFAULT;
9072             else
9073                 ret = get_errno(rename(p, p2));
9074             unlock_user(p2, arg2, 0);
9075             unlock_user(p, arg1, 0);
9076         }
9077         return ret;
9078 #endif
9079 #if defined(TARGET_NR_renameat)
9080     case TARGET_NR_renameat:
9081         {
9082             void *p2;
9083             p  = lock_user_string(arg2);
9084             p2 = lock_user_string(arg4);
9085             if (!p || !p2)
9086                 ret = -TARGET_EFAULT;
9087             else
9088                 ret = get_errno(renameat(arg1, p, arg3, p2));
9089             unlock_user(p2, arg4, 0);
9090             unlock_user(p, arg2, 0);
9091         }
9092         return ret;
9093 #endif
9094 #if defined(TARGET_NR_renameat2)
9095     case TARGET_NR_renameat2:
9096         {
9097             void *p2;
9098             p  = lock_user_string(arg2);
9099             p2 = lock_user_string(arg4);
9100             if (!p || !p2) {
9101                 ret = -TARGET_EFAULT;
9102             } else {
9103                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9104             }
9105             unlock_user(p2, arg4, 0);
9106             unlock_user(p, arg2, 0);
9107         }
9108         return ret;
9109 #endif
9110 #ifdef TARGET_NR_mkdir
9111     case TARGET_NR_mkdir:
9112         if (!(p = lock_user_string(arg1)))
9113             return -TARGET_EFAULT;
9114         ret = get_errno(mkdir(p, arg2));
9115         unlock_user(p, arg1, 0);
9116         return ret;
9117 #endif
9118 #if defined(TARGET_NR_mkdirat)
9119     case TARGET_NR_mkdirat:
9120         if (!(p = lock_user_string(arg2)))
9121             return -TARGET_EFAULT;
9122         ret = get_errno(mkdirat(arg1, p, arg3));
9123         unlock_user(p, arg2, 0);
9124         return ret;
9125 #endif
9126 #ifdef TARGET_NR_rmdir
9127     case TARGET_NR_rmdir:
9128         if (!(p = lock_user_string(arg1)))
9129             return -TARGET_EFAULT;
9130         ret = get_errno(rmdir(p));
9131         unlock_user(p, arg1, 0);
9132         return ret;
9133 #endif
9134     case TARGET_NR_dup:
9135         ret = get_errno(dup(arg1));
9136         if (ret >= 0) {
9137             fd_trans_dup(arg1, ret);
9138         }
9139         return ret;
9140 #ifdef TARGET_NR_pipe
9141     case TARGET_NR_pipe:
9142         return do_pipe(cpu_env, arg1, 0, 0);
9143 #endif
9144 #ifdef TARGET_NR_pipe2
9145     case TARGET_NR_pipe2:
9146         return do_pipe(cpu_env, arg1,
9147                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9148 #endif
9149     case TARGET_NR_times:
9150         {
9151             struct target_tms *tmsp;
9152             struct tms tms;
9153             ret = get_errno(times(&tms));
9154             if (arg1) {
9155                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9156                 if (!tmsp)
9157                     return -TARGET_EFAULT;
9158                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9159                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9160                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9161                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9162             }
9163             if (!is_error(ret))
9164                 ret = host_to_target_clock_t(ret);
9165         }
9166         return ret;
9167     case TARGET_NR_acct:
9168         if (arg1 == 0) {
9169             ret = get_errno(acct(NULL));
9170         } else {
9171             if (!(p = lock_user_string(arg1))) {
9172                 return -TARGET_EFAULT;
9173             }
9174             ret = get_errno(acct(path(p)));
9175             unlock_user(p, arg1, 0);
9176         }
9177         return ret;
9178 #ifdef TARGET_NR_umount2
9179     case TARGET_NR_umount2:
9180         if (!(p = lock_user_string(arg1)))
9181             return -TARGET_EFAULT;
9182         ret = get_errno(umount2(p, arg2));
9183         unlock_user(p, arg1, 0);
9184         return ret;
9185 #endif
9186     case TARGET_NR_ioctl:
9187         return do_ioctl(arg1, arg2, arg3);
9188 #ifdef TARGET_NR_fcntl
9189     case TARGET_NR_fcntl:
9190         return do_fcntl(arg1, arg2, arg3);
9191 #endif
9192     case TARGET_NR_setpgid:
9193         return get_errno(setpgid(arg1, arg2));
9194     case TARGET_NR_umask:
9195         return get_errno(umask(arg1));
9196     case TARGET_NR_chroot:
9197         if (!(p = lock_user_string(arg1)))
9198             return -TARGET_EFAULT;
9199         ret = get_errno(chroot(p));
9200         unlock_user(p, arg1, 0);
9201         return ret;
9202 #ifdef TARGET_NR_dup2
9203     case TARGET_NR_dup2:
9204         ret = get_errno(dup2(arg1, arg2));
9205         if (ret >= 0) {
9206             fd_trans_dup(arg1, arg2);
9207         }
9208         return ret;
9209 #endif
9210 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9211     case TARGET_NR_dup3:
9212     {
9213         int host_flags;
9214 
9215         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9216             return -EINVAL;
9217         }
9218         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9219         ret = get_errno(dup3(arg1, arg2, host_flags));
9220         if (ret >= 0) {
9221             fd_trans_dup(arg1, arg2);
9222         }
9223         return ret;
9224     }
9225 #endif
9226 #ifdef TARGET_NR_getppid /* not on alpha */
9227     case TARGET_NR_getppid:
9228         return get_errno(getppid());
9229 #endif
9230 #ifdef TARGET_NR_getpgrp
9231     case TARGET_NR_getpgrp:
9232         return get_errno(getpgrp());
9233 #endif
9234     case TARGET_NR_setsid:
9235         return get_errno(setsid());
9236 #ifdef TARGET_NR_sigaction
9237     case TARGET_NR_sigaction:
9238         {
9239 #if defined(TARGET_MIPS)
9240 	    struct target_sigaction act, oact, *pact, *old_act;
9241 
9242 	    if (arg2) {
9243                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9244                     return -TARGET_EFAULT;
9245 		act._sa_handler = old_act->_sa_handler;
9246 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9247 		act.sa_flags = old_act->sa_flags;
9248 		unlock_user_struct(old_act, arg2, 0);
9249 		pact = &act;
9250 	    } else {
9251 		pact = NULL;
9252 	    }
9253 
9254         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9255 
9256 	    if (!is_error(ret) && arg3) {
9257                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9258                     return -TARGET_EFAULT;
9259 		old_act->_sa_handler = oact._sa_handler;
9260 		old_act->sa_flags = oact.sa_flags;
9261 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9262 		old_act->sa_mask.sig[1] = 0;
9263 		old_act->sa_mask.sig[2] = 0;
9264 		old_act->sa_mask.sig[3] = 0;
9265 		unlock_user_struct(old_act, arg3, 1);
9266 	    }
9267 #else
9268             struct target_old_sigaction *old_act;
9269             struct target_sigaction act, oact, *pact;
9270             if (arg2) {
9271                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9272                     return -TARGET_EFAULT;
9273                 act._sa_handler = old_act->_sa_handler;
9274                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9275                 act.sa_flags = old_act->sa_flags;
9276 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9277                 act.sa_restorer = old_act->sa_restorer;
9278 #endif
9279                 unlock_user_struct(old_act, arg2, 0);
9280                 pact = &act;
9281             } else {
9282                 pact = NULL;
9283             }
9284             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9285             if (!is_error(ret) && arg3) {
9286                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9287                     return -TARGET_EFAULT;
9288                 old_act->_sa_handler = oact._sa_handler;
9289                 old_act->sa_mask = oact.sa_mask.sig[0];
9290                 old_act->sa_flags = oact.sa_flags;
9291 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9292                 old_act->sa_restorer = oact.sa_restorer;
9293 #endif
9294                 unlock_user_struct(old_act, arg3, 1);
9295             }
9296 #endif
9297         }
9298         return ret;
9299 #endif
9300     case TARGET_NR_rt_sigaction:
9301         {
9302             /*
9303              * For Alpha and SPARC this is a 5 argument syscall, with
9304              * a 'restorer' parameter which must be copied into the
9305              * sa_restorer field of the sigaction struct.
9306              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9307              * and arg5 is the sigsetsize.
9308              */
9309 #if defined(TARGET_ALPHA)
9310             target_ulong sigsetsize = arg4;
9311             target_ulong restorer = arg5;
9312 #elif defined(TARGET_SPARC)
9313             target_ulong restorer = arg4;
9314             target_ulong sigsetsize = arg5;
9315 #else
9316             target_ulong sigsetsize = arg4;
9317             target_ulong restorer = 0;
9318 #endif
9319             struct target_sigaction *act = NULL;
9320             struct target_sigaction *oact = NULL;
9321 
9322             if (sigsetsize != sizeof(target_sigset_t)) {
9323                 return -TARGET_EINVAL;
9324             }
9325             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9326                 return -TARGET_EFAULT;
9327             }
9328             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9329                 ret = -TARGET_EFAULT;
9330             } else {
9331                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9332                 if (oact) {
9333                     unlock_user_struct(oact, arg3, 1);
9334                 }
9335             }
9336             if (act) {
9337                 unlock_user_struct(act, arg2, 0);
9338             }
9339         }
9340         return ret;
9341 #ifdef TARGET_NR_sgetmask /* not on alpha */
9342     case TARGET_NR_sgetmask:
9343         {
9344             sigset_t cur_set;
9345             abi_ulong target_set;
9346             ret = do_sigprocmask(0, NULL, &cur_set);
9347             if (!ret) {
9348                 host_to_target_old_sigset(&target_set, &cur_set);
9349                 ret = target_set;
9350             }
9351         }
9352         return ret;
9353 #endif
9354 #ifdef TARGET_NR_ssetmask /* not on alpha */
9355     case TARGET_NR_ssetmask:
9356         {
9357             sigset_t set, oset;
9358             abi_ulong target_set = arg1;
9359             target_to_host_old_sigset(&set, &target_set);
9360             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9361             if (!ret) {
9362                 host_to_target_old_sigset(&target_set, &oset);
9363                 ret = target_set;
9364             }
9365         }
9366         return ret;
9367 #endif
9368 #ifdef TARGET_NR_sigprocmask
9369     case TARGET_NR_sigprocmask:
9370         {
9371 #if defined(TARGET_ALPHA)
9372             sigset_t set, oldset;
9373             abi_ulong mask;
9374             int how;
9375 
9376             switch (arg1) {
9377             case TARGET_SIG_BLOCK:
9378                 how = SIG_BLOCK;
9379                 break;
9380             case TARGET_SIG_UNBLOCK:
9381                 how = SIG_UNBLOCK;
9382                 break;
9383             case TARGET_SIG_SETMASK:
9384                 how = SIG_SETMASK;
9385                 break;
9386             default:
9387                 return -TARGET_EINVAL;
9388             }
9389             mask = arg2;
9390             target_to_host_old_sigset(&set, &mask);
9391 
9392             ret = do_sigprocmask(how, &set, &oldset);
9393             if (!is_error(ret)) {
9394                 host_to_target_old_sigset(&mask, &oldset);
9395                 ret = mask;
9396                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9397             }
9398 #else
9399             sigset_t set, oldset, *set_ptr;
9400             int how;
9401 
9402             if (arg2) {
9403                 switch (arg1) {
9404                 case TARGET_SIG_BLOCK:
9405                     how = SIG_BLOCK;
9406                     break;
9407                 case TARGET_SIG_UNBLOCK:
9408                     how = SIG_UNBLOCK;
9409                     break;
9410                 case TARGET_SIG_SETMASK:
9411                     how = SIG_SETMASK;
9412                     break;
9413                 default:
9414                     return -TARGET_EINVAL;
9415                 }
9416                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9417                     return -TARGET_EFAULT;
9418                 target_to_host_old_sigset(&set, p);
9419                 unlock_user(p, arg2, 0);
9420                 set_ptr = &set;
9421             } else {
9422                 how = 0;
9423                 set_ptr = NULL;
9424             }
9425             ret = do_sigprocmask(how, set_ptr, &oldset);
9426             if (!is_error(ret) && arg3) {
9427                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9428                     return -TARGET_EFAULT;
9429                 host_to_target_old_sigset(p, &oldset);
9430                 unlock_user(p, arg3, sizeof(target_sigset_t));
9431             }
9432 #endif
9433         }
9434         return ret;
9435 #endif
9436     case TARGET_NR_rt_sigprocmask:
9437         {
9438             int how = arg1;
9439             sigset_t set, oldset, *set_ptr;
9440 
9441             if (arg4 != sizeof(target_sigset_t)) {
9442                 return -TARGET_EINVAL;
9443             }
9444 
9445             if (arg2) {
9446                 switch(how) {
9447                 case TARGET_SIG_BLOCK:
9448                     how = SIG_BLOCK;
9449                     break;
9450                 case TARGET_SIG_UNBLOCK:
9451                     how = SIG_UNBLOCK;
9452                     break;
9453                 case TARGET_SIG_SETMASK:
9454                     how = SIG_SETMASK;
9455                     break;
9456                 default:
9457                     return -TARGET_EINVAL;
9458                 }
9459                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9460                     return -TARGET_EFAULT;
9461                 target_to_host_sigset(&set, p);
9462                 unlock_user(p, arg2, 0);
9463                 set_ptr = &set;
9464             } else {
9465                 how = 0;
9466                 set_ptr = NULL;
9467             }
9468             ret = do_sigprocmask(how, set_ptr, &oldset);
9469             if (!is_error(ret) && arg3) {
9470                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9471                     return -TARGET_EFAULT;
9472                 host_to_target_sigset(p, &oldset);
9473                 unlock_user(p, arg3, sizeof(target_sigset_t));
9474             }
9475         }
9476         return ret;
9477 #ifdef TARGET_NR_sigpending
9478     case TARGET_NR_sigpending:
9479         {
9480             sigset_t set;
9481             ret = get_errno(sigpending(&set));
9482             if (!is_error(ret)) {
9483                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9484                     return -TARGET_EFAULT;
9485                 host_to_target_old_sigset(p, &set);
9486                 unlock_user(p, arg1, sizeof(target_sigset_t));
9487             }
9488         }
9489         return ret;
9490 #endif
9491     case TARGET_NR_rt_sigpending:
9492         {
9493             sigset_t set;
9494 
9495             /* Yes, this check is >, not != like most. We follow the kernel's
9496              * logic and it does it like this because it implements
9497              * NR_sigpending through the same code path, and in that case
9498              * the old_sigset_t is smaller in size.
9499              */
9500             if (arg2 > sizeof(target_sigset_t)) {
9501                 return -TARGET_EINVAL;
9502             }
9503 
9504             ret = get_errno(sigpending(&set));
9505             if (!is_error(ret)) {
9506                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9507                     return -TARGET_EFAULT;
9508                 host_to_target_sigset(p, &set);
9509                 unlock_user(p, arg1, sizeof(target_sigset_t));
9510             }
9511         }
9512         return ret;
9513 #ifdef TARGET_NR_sigsuspend
9514     case TARGET_NR_sigsuspend:
9515         {
9516             TaskState *ts = cpu->opaque;
9517 #if defined(TARGET_ALPHA)
9518             abi_ulong mask = arg1;
9519             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9520 #else
9521             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9522                 return -TARGET_EFAULT;
9523             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9524             unlock_user(p, arg1, 0);
9525 #endif
9526             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9527                                                SIGSET_T_SIZE));
9528             if (ret != -QEMU_ERESTARTSYS) {
9529                 ts->in_sigsuspend = 1;
9530             }
9531         }
9532         return ret;
9533 #endif
9534     case TARGET_NR_rt_sigsuspend:
9535         {
9536             TaskState *ts = cpu->opaque;
9537 
9538             if (arg2 != sizeof(target_sigset_t)) {
9539                 return -TARGET_EINVAL;
9540             }
9541             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9542                 return -TARGET_EFAULT;
9543             target_to_host_sigset(&ts->sigsuspend_mask, p);
9544             unlock_user(p, arg1, 0);
9545             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9546                                                SIGSET_T_SIZE));
9547             if (ret != -QEMU_ERESTARTSYS) {
9548                 ts->in_sigsuspend = 1;
9549             }
9550         }
9551         return ret;
9552 #ifdef TARGET_NR_rt_sigtimedwait
9553     case TARGET_NR_rt_sigtimedwait:
9554         {
9555             sigset_t set;
9556             struct timespec uts, *puts;
9557             siginfo_t uinfo;
9558 
9559             if (arg4 != sizeof(target_sigset_t)) {
9560                 return -TARGET_EINVAL;
9561             }
9562 
9563             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9564                 return -TARGET_EFAULT;
9565             target_to_host_sigset(&set, p);
9566             unlock_user(p, arg1, 0);
9567             if (arg3) {
9568                 puts = &uts;
9569                 if (target_to_host_timespec(puts, arg3)) {
9570                     return -TARGET_EFAULT;
9571                 }
9572             } else {
9573                 puts = NULL;
9574             }
9575             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9576                                                  SIGSET_T_SIZE));
9577             if (!is_error(ret)) {
9578                 if (arg2) {
9579                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9580                                   0);
9581                     if (!p) {
9582                         return -TARGET_EFAULT;
9583                     }
9584                     host_to_target_siginfo(p, &uinfo);
9585                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9586                 }
9587                 ret = host_to_target_signal(ret);
9588             }
9589         }
9590         return ret;
9591 #endif
9592 #ifdef TARGET_NR_rt_sigtimedwait_time64
9593     case TARGET_NR_rt_sigtimedwait_time64:
9594         {
9595             sigset_t set;
9596             struct timespec uts, *puts;
9597             siginfo_t uinfo;
9598 
9599             if (arg4 != sizeof(target_sigset_t)) {
9600                 return -TARGET_EINVAL;
9601             }
9602 
9603             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9604             if (!p) {
9605                 return -TARGET_EFAULT;
9606             }
9607             target_to_host_sigset(&set, p);
9608             unlock_user(p, arg1, 0);
9609             if (arg3) {
9610                 puts = &uts;
9611                 if (target_to_host_timespec64(puts, arg3)) {
9612                     return -TARGET_EFAULT;
9613                 }
9614             } else {
9615                 puts = NULL;
9616             }
9617             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9618                                                  SIGSET_T_SIZE));
9619             if (!is_error(ret)) {
9620                 if (arg2) {
9621                     p = lock_user(VERIFY_WRITE, arg2,
9622                                   sizeof(target_siginfo_t), 0);
9623                     if (!p) {
9624                         return -TARGET_EFAULT;
9625                     }
9626                     host_to_target_siginfo(p, &uinfo);
9627                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9628                 }
9629                 ret = host_to_target_signal(ret);
9630             }
9631         }
9632         return ret;
9633 #endif
9634     case TARGET_NR_rt_sigqueueinfo:
9635         {
9636             siginfo_t uinfo;
9637 
9638             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9639             if (!p) {
9640                 return -TARGET_EFAULT;
9641             }
9642             target_to_host_siginfo(&uinfo, p);
9643             unlock_user(p, arg3, 0);
9644             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9645         }
9646         return ret;
9647     case TARGET_NR_rt_tgsigqueueinfo:
9648         {
9649             siginfo_t uinfo;
9650 
9651             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9652             if (!p) {
9653                 return -TARGET_EFAULT;
9654             }
9655             target_to_host_siginfo(&uinfo, p);
9656             unlock_user(p, arg4, 0);
9657             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9658         }
9659         return ret;
9660 #ifdef TARGET_NR_sigreturn
9661     case TARGET_NR_sigreturn:
9662         if (block_signals()) {
9663             return -QEMU_ERESTARTSYS;
9664         }
9665         return do_sigreturn(cpu_env);
9666 #endif
9667     case TARGET_NR_rt_sigreturn:
9668         if (block_signals()) {
9669             return -QEMU_ERESTARTSYS;
9670         }
9671         return do_rt_sigreturn(cpu_env);
9672     case TARGET_NR_sethostname:
9673         if (!(p = lock_user_string(arg1)))
9674             return -TARGET_EFAULT;
9675         ret = get_errno(sethostname(p, arg2));
9676         unlock_user(p, arg1, 0);
9677         return ret;
9678 #ifdef TARGET_NR_setrlimit
9679     case TARGET_NR_setrlimit:
9680         {
9681             int resource = target_to_host_resource(arg1);
9682             struct target_rlimit *target_rlim;
9683             struct rlimit rlim;
9684             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9685                 return -TARGET_EFAULT;
9686             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9687             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9688             unlock_user_struct(target_rlim, arg2, 0);
9689             /*
9690              * If we just passed through resource limit settings for memory then
9691              * they would also apply to QEMU's own allocations, and QEMU will
9692              * crash or hang or die if its allocations fail. Ideally we would
9693              * track the guest allocations in QEMU and apply the limits ourselves.
9694              * For now, just tell the guest the call succeeded but don't actually
9695              * limit anything.
9696              */
9697             if (resource != RLIMIT_AS &&
9698                 resource != RLIMIT_DATA &&
9699                 resource != RLIMIT_STACK) {
9700                 return get_errno(setrlimit(resource, &rlim));
9701             } else {
9702                 return 0;
9703             }
9704         }
9705 #endif
9706 #ifdef TARGET_NR_getrlimit
9707     case TARGET_NR_getrlimit:
9708         {
9709             int resource = target_to_host_resource(arg1);
9710             struct target_rlimit *target_rlim;
9711             struct rlimit rlim;
9712 
9713             ret = get_errno(getrlimit(resource, &rlim));
9714             if (!is_error(ret)) {
9715                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9716                     return -TARGET_EFAULT;
9717                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9718                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9719                 unlock_user_struct(target_rlim, arg2, 1);
9720             }
9721         }
9722         return ret;
9723 #endif
9724     case TARGET_NR_getrusage:
9725         {
9726             struct rusage rusage;
9727             ret = get_errno(getrusage(arg1, &rusage));
9728             if (!is_error(ret)) {
9729                 ret = host_to_target_rusage(arg2, &rusage);
9730             }
9731         }
9732         return ret;
9733 #if defined(TARGET_NR_gettimeofday)
9734     case TARGET_NR_gettimeofday:
9735         {
9736             struct timeval tv;
9737             struct timezone tz;
9738 
9739             ret = get_errno(gettimeofday(&tv, &tz));
9740             if (!is_error(ret)) {
9741                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9742                     return -TARGET_EFAULT;
9743                 }
9744                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9745                     return -TARGET_EFAULT;
9746                 }
9747             }
9748         }
9749         return ret;
9750 #endif
9751 #if defined(TARGET_NR_settimeofday)
9752     case TARGET_NR_settimeofday:
9753         {
9754             struct timeval tv, *ptv = NULL;
9755             struct timezone tz, *ptz = NULL;
9756 
9757             if (arg1) {
9758                 if (copy_from_user_timeval(&tv, arg1)) {
9759                     return -TARGET_EFAULT;
9760                 }
9761                 ptv = &tv;
9762             }
9763 
9764             if (arg2) {
9765                 if (copy_from_user_timezone(&tz, arg2)) {
9766                     return -TARGET_EFAULT;
9767                 }
9768                 ptz = &tz;
9769             }
9770 
9771             return get_errno(settimeofday(ptv, ptz));
9772         }
9773 #endif
9774 #if defined(TARGET_NR_select)
9775     case TARGET_NR_select:
9776 #if defined(TARGET_WANT_NI_OLD_SELECT)
9777         /* some architectures used to have old_select here
9778          * but now ENOSYS it.
9779          */
9780         ret = -TARGET_ENOSYS;
9781 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9782         ret = do_old_select(arg1);
9783 #else
9784         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9785 #endif
9786         return ret;
9787 #endif
9788 #ifdef TARGET_NR_pselect6
9789     case TARGET_NR_pselect6:
9790         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9791 #endif
9792 #ifdef TARGET_NR_pselect6_time64
9793     case TARGET_NR_pselect6_time64:
9794         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9795 #endif
9796 #ifdef TARGET_NR_symlink
9797     case TARGET_NR_symlink:
9798         {
9799             void *p2;
9800             p = lock_user_string(arg1);
9801             p2 = lock_user_string(arg2);
9802             if (!p || !p2)
9803                 ret = -TARGET_EFAULT;
9804             else
9805                 ret = get_errno(symlink(p, p2));
9806             unlock_user(p2, arg2, 0);
9807             unlock_user(p, arg1, 0);
9808         }
9809         return ret;
9810 #endif
9811 #if defined(TARGET_NR_symlinkat)
9812     case TARGET_NR_symlinkat:
9813         {
9814             void *p2;
9815             p  = lock_user_string(arg1);
9816             p2 = lock_user_string(arg3);
9817             if (!p || !p2)
9818                 ret = -TARGET_EFAULT;
9819             else
9820                 ret = get_errno(symlinkat(p, arg2, p2));
9821             unlock_user(p2, arg3, 0);
9822             unlock_user(p, arg1, 0);
9823         }
9824         return ret;
9825 #endif
9826 #ifdef TARGET_NR_readlink
9827     case TARGET_NR_readlink:
9828         {
9829             void *p2;
9830             p = lock_user_string(arg1);
9831             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9832             if (!p || !p2) {
9833                 ret = -TARGET_EFAULT;
9834             } else if (!arg3) {
9835                 /* Short circuit this for the magic exe check. */
9836                 ret = -TARGET_EINVAL;
9837             } else if (is_proc_myself((const char *)p, "exe")) {
9838                 char real[PATH_MAX], *temp;
9839                 temp = realpath(exec_path, real);
9840                 /* Return value is # of bytes that we wrote to the buffer. */
9841                 if (temp == NULL) {
9842                     ret = get_errno(-1);
9843                 } else {
9844                     /* Don't worry about sign mismatch as earlier mapping
9845                      * logic would have thrown a bad address error. */
9846                     ret = MIN(strlen(real), arg3);
9847                     /* We cannot NUL terminate the string. */
9848                     memcpy(p2, real, ret);
9849                 }
9850             } else {
9851                 ret = get_errno(readlink(path(p), p2, arg3));
9852             }
9853             unlock_user(p2, arg2, ret);
9854             unlock_user(p, arg1, 0);
9855         }
9856         return ret;
9857 #endif
9858 #if defined(TARGET_NR_readlinkat)
9859     case TARGET_NR_readlinkat:
9860         {
9861             void *p2;
9862             p  = lock_user_string(arg2);
9863             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9864             if (!p || !p2) {
9865                 ret = -TARGET_EFAULT;
9866             } else if (is_proc_myself((const char *)p, "exe")) {
9867                 char real[PATH_MAX], *temp;
9868                 temp = realpath(exec_path, real);
9869                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9870                 snprintf((char *)p2, arg4, "%s", real);
9871             } else {
9872                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9873             }
9874             unlock_user(p2, arg3, ret);
9875             unlock_user(p, arg2, 0);
9876         }
9877         return ret;
9878 #endif
9879 #ifdef TARGET_NR_swapon
9880     case TARGET_NR_swapon:
9881         if (!(p = lock_user_string(arg1)))
9882             return -TARGET_EFAULT;
9883         ret = get_errno(swapon(p, arg2));
9884         unlock_user(p, arg1, 0);
9885         return ret;
9886 #endif
9887     case TARGET_NR_reboot:
9888         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9889            /* arg4 must be ignored in all other cases */
9890            p = lock_user_string(arg4);
9891            if (!p) {
9892                return -TARGET_EFAULT;
9893            }
9894            ret = get_errno(reboot(arg1, arg2, arg3, p));
9895            unlock_user(p, arg4, 0);
9896         } else {
9897            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9898         }
9899         return ret;
9900 #ifdef TARGET_NR_mmap
9901     case TARGET_NR_mmap:
9902 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9903     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9904     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9905     || defined(TARGET_S390X)
9906         {
9907             abi_ulong *v;
9908             abi_ulong v1, v2, v3, v4, v5, v6;
9909             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9910                 return -TARGET_EFAULT;
9911             v1 = tswapal(v[0]);
9912             v2 = tswapal(v[1]);
9913             v3 = tswapal(v[2]);
9914             v4 = tswapal(v[3]);
9915             v5 = tswapal(v[4]);
9916             v6 = tswapal(v[5]);
9917             unlock_user(v, arg1, 0);
9918             ret = get_errno(target_mmap(v1, v2, v3,
9919                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9920                                         v5, v6));
9921         }
9922 #else
9923         /* mmap pointers are always untagged */
9924         ret = get_errno(target_mmap(arg1, arg2, arg3,
9925                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9926                                     arg5,
9927                                     arg6));
9928 #endif
9929         return ret;
9930 #endif
9931 #ifdef TARGET_NR_mmap2
9932     case TARGET_NR_mmap2:
9933 #ifndef MMAP_SHIFT
9934 #define MMAP_SHIFT 12
9935 #endif
9936         ret = target_mmap(arg1, arg2, arg3,
9937                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9938                           arg5, arg6 << MMAP_SHIFT);
9939         return get_errno(ret);
9940 #endif
9941     case TARGET_NR_munmap:
9942         arg1 = cpu_untagged_addr(cpu, arg1);
9943         return get_errno(target_munmap(arg1, arg2));
9944     case TARGET_NR_mprotect:
9945         arg1 = cpu_untagged_addr(cpu, arg1);
9946         {
9947             TaskState *ts = cpu->opaque;
9948             /* Special hack to detect libc making the stack executable.  */
9949             if ((arg3 & PROT_GROWSDOWN)
9950                 && arg1 >= ts->info->stack_limit
9951                 && arg1 <= ts->info->start_stack) {
9952                 arg3 &= ~PROT_GROWSDOWN;
9953                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9954                 arg1 = ts->info->stack_limit;
9955             }
9956         }
9957         return get_errno(target_mprotect(arg1, arg2, arg3));
9958 #ifdef TARGET_NR_mremap
9959     case TARGET_NR_mremap:
9960         arg1 = cpu_untagged_addr(cpu, arg1);
9961         /* mremap new_addr (arg5) is always untagged */
9962         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9963 #endif
9964         /* ??? msync/mlock/munlock are broken for softmmu.  */
9965 #ifdef TARGET_NR_msync
9966     case TARGET_NR_msync:
9967         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9968 #endif
9969 #ifdef TARGET_NR_mlock
9970     case TARGET_NR_mlock:
9971         return get_errno(mlock(g2h(cpu, arg1), arg2));
9972 #endif
9973 #ifdef TARGET_NR_munlock
9974     case TARGET_NR_munlock:
9975         return get_errno(munlock(g2h(cpu, arg1), arg2));
9976 #endif
9977 #ifdef TARGET_NR_mlockall
9978     case TARGET_NR_mlockall:
9979         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9980 #endif
9981 #ifdef TARGET_NR_munlockall
9982     case TARGET_NR_munlockall:
9983         return get_errno(munlockall());
9984 #endif
9985 #ifdef TARGET_NR_truncate
9986     case TARGET_NR_truncate:
9987         if (!(p = lock_user_string(arg1)))
9988             return -TARGET_EFAULT;
9989         ret = get_errno(truncate(p, arg2));
9990         unlock_user(p, arg1, 0);
9991         return ret;
9992 #endif
9993 #ifdef TARGET_NR_ftruncate
9994     case TARGET_NR_ftruncate:
9995         return get_errno(ftruncate(arg1, arg2));
9996 #endif
9997     case TARGET_NR_fchmod:
9998         return get_errno(fchmod(arg1, arg2));
9999 #if defined(TARGET_NR_fchmodat)
10000     case TARGET_NR_fchmodat:
10001         if (!(p = lock_user_string(arg2)))
10002             return -TARGET_EFAULT;
10003         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10004         unlock_user(p, arg2, 0);
10005         return ret;
10006 #endif
10007     case TARGET_NR_getpriority:
10008         /* Note that negative values are valid for getpriority, so we must
10009            differentiate based on errno settings.  */
10010         errno = 0;
10011         ret = getpriority(arg1, arg2);
10012         if (ret == -1 && errno != 0) {
10013             return -host_to_target_errno(errno);
10014         }
10015 #ifdef TARGET_ALPHA
10016         /* Return value is the unbiased priority.  Signal no error.  */
10017         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
10018 #else
10019         /* Return value is a biased priority to avoid negative numbers.  */
10020         ret = 20 - ret;
10021 #endif
10022         return ret;
10023     case TARGET_NR_setpriority:
10024         return get_errno(setpriority(arg1, arg2, arg3));
10025 #ifdef TARGET_NR_statfs
10026     case TARGET_NR_statfs:
10027         if (!(p = lock_user_string(arg1))) {
10028             return -TARGET_EFAULT;
10029         }
10030         ret = get_errno(statfs(path(p), &stfs));
10031         unlock_user(p, arg1, 0);
10032     convert_statfs:
10033         if (!is_error(ret)) {
10034             struct target_statfs *target_stfs;
10035 
10036             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10037                 return -TARGET_EFAULT;
10038             __put_user(stfs.f_type, &target_stfs->f_type);
10039             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10040             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10041             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10042             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10043             __put_user(stfs.f_files, &target_stfs->f_files);
10044             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10045             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10046             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10047             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10048             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10049 #ifdef _STATFS_F_FLAGS
10050             __put_user(stfs.f_flags, &target_stfs->f_flags);
10051 #else
10052             __put_user(0, &target_stfs->f_flags);
10053 #endif
10054             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10055             unlock_user_struct(target_stfs, arg2, 1);
10056         }
10057         return ret;
10058 #endif
10059 #ifdef TARGET_NR_fstatfs
10060     case TARGET_NR_fstatfs:
10061         ret = get_errno(fstatfs(arg1, &stfs));
10062         goto convert_statfs;
10063 #endif
10064 #ifdef TARGET_NR_statfs64
10065     case TARGET_NR_statfs64:
10066         if (!(p = lock_user_string(arg1))) {
10067             return -TARGET_EFAULT;
10068         }
10069         ret = get_errno(statfs(path(p), &stfs));
10070         unlock_user(p, arg1, 0);
10071     convert_statfs64:
10072         if (!is_error(ret)) {
10073             struct target_statfs64 *target_stfs;
10074 
10075             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10076                 return -TARGET_EFAULT;
10077             __put_user(stfs.f_type, &target_stfs->f_type);
10078             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10079             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10080             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10081             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10082             __put_user(stfs.f_files, &target_stfs->f_files);
10083             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10084             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10085             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10086             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10087             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10088 #ifdef _STATFS_F_FLAGS
10089             __put_user(stfs.f_flags, &target_stfs->f_flags);
10090 #else
10091             __put_user(0, &target_stfs->f_flags);
10092 #endif
10093             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10094             unlock_user_struct(target_stfs, arg3, 1);
10095         }
10096         return ret;
10097     case TARGET_NR_fstatfs64:
10098         ret = get_errno(fstatfs(arg1, &stfs));
10099         goto convert_statfs64;
10100 #endif
10101 #ifdef TARGET_NR_socketcall
10102     case TARGET_NR_socketcall:
10103         return do_socketcall(arg1, arg2);
10104 #endif
10105 #ifdef TARGET_NR_accept
10106     case TARGET_NR_accept:
10107         return do_accept4(arg1, arg2, arg3, 0);
10108 #endif
10109 #ifdef TARGET_NR_accept4
10110     case TARGET_NR_accept4:
10111         return do_accept4(arg1, arg2, arg3, arg4);
10112 #endif
10113 #ifdef TARGET_NR_bind
10114     case TARGET_NR_bind:
10115         return do_bind(arg1, arg2, arg3);
10116 #endif
10117 #ifdef TARGET_NR_connect
10118     case TARGET_NR_connect:
10119         return do_connect(arg1, arg2, arg3);
10120 #endif
10121 #ifdef TARGET_NR_getpeername
10122     case TARGET_NR_getpeername:
10123         return do_getpeername(arg1, arg2, arg3);
10124 #endif
10125 #ifdef TARGET_NR_getsockname
10126     case TARGET_NR_getsockname:
10127         return do_getsockname(arg1, arg2, arg3);
10128 #endif
10129 #ifdef TARGET_NR_getsockopt
10130     case TARGET_NR_getsockopt:
10131         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10132 #endif
10133 #ifdef TARGET_NR_listen
10134     case TARGET_NR_listen:
10135         return get_errno(listen(arg1, arg2));
10136 #endif
10137 #ifdef TARGET_NR_recv
10138     case TARGET_NR_recv:
10139         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10140 #endif
10141 #ifdef TARGET_NR_recvfrom
10142     case TARGET_NR_recvfrom:
10143         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10144 #endif
10145 #ifdef TARGET_NR_recvmsg
10146     case TARGET_NR_recvmsg:
10147         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10148 #endif
10149 #ifdef TARGET_NR_send
10150     case TARGET_NR_send:
10151         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10152 #endif
10153 #ifdef TARGET_NR_sendmsg
10154     case TARGET_NR_sendmsg:
10155         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10156 #endif
10157 #ifdef TARGET_NR_sendmmsg
10158     case TARGET_NR_sendmmsg:
10159         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10160 #endif
10161 #ifdef TARGET_NR_recvmmsg
10162     case TARGET_NR_recvmmsg:
10163         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10164 #endif
10165 #ifdef TARGET_NR_sendto
10166     case TARGET_NR_sendto:
10167         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10168 #endif
10169 #ifdef TARGET_NR_shutdown
10170     case TARGET_NR_shutdown:
10171         return get_errno(shutdown(arg1, arg2));
10172 #endif
10173 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10174     case TARGET_NR_getrandom:
10175         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10176         if (!p) {
10177             return -TARGET_EFAULT;
10178         }
10179         ret = get_errno(getrandom(p, arg2, arg3));
10180         unlock_user(p, arg1, ret);
10181         return ret;
10182 #endif
10183 #ifdef TARGET_NR_socket
10184     case TARGET_NR_socket:
10185         return do_socket(arg1, arg2, arg3);
10186 #endif
10187 #ifdef TARGET_NR_socketpair
10188     case TARGET_NR_socketpair:
10189         return do_socketpair(arg1, arg2, arg3, arg4);
10190 #endif
10191 #ifdef TARGET_NR_setsockopt
10192     case TARGET_NR_setsockopt:
10193         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10194 #endif
10195 #if defined(TARGET_NR_syslog)
10196     case TARGET_NR_syslog:
10197         {
10198             int len = arg2;
10199 
10200             switch (arg1) {
10201             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10202             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10203             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10204             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10205             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10206             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10207             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10208             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10209                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10210             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10211             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10212             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10213                 {
10214                     if (len < 0) {
10215                         return -TARGET_EINVAL;
10216                     }
10217                     if (len == 0) {
10218                         return 0;
10219                     }
10220                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10221                     if (!p) {
10222                         return -TARGET_EFAULT;
10223                     }
10224                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10225                     unlock_user(p, arg2, arg3);
10226                 }
10227                 return ret;
10228             default:
10229                 return -TARGET_EINVAL;
10230             }
10231         }
10232         break;
10233 #endif
10234     case TARGET_NR_setitimer:
10235         {
10236             struct itimerval value, ovalue, *pvalue;
10237 
10238             if (arg2) {
10239                 pvalue = &value;
10240                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10241                     || copy_from_user_timeval(&pvalue->it_value,
10242                                               arg2 + sizeof(struct target_timeval)))
10243                     return -TARGET_EFAULT;
10244             } else {
10245                 pvalue = NULL;
10246             }
10247             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10248             if (!is_error(ret) && arg3) {
10249                 if (copy_to_user_timeval(arg3,
10250                                          &ovalue.it_interval)
10251                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10252                                             &ovalue.it_value))
10253                     return -TARGET_EFAULT;
10254             }
10255         }
10256         return ret;
10257     case TARGET_NR_getitimer:
10258         {
10259             struct itimerval value;
10260 
10261             ret = get_errno(getitimer(arg1, &value));
10262             if (!is_error(ret) && arg2) {
10263                 if (copy_to_user_timeval(arg2,
10264                                          &value.it_interval)
10265                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10266                                             &value.it_value))
10267                     return -TARGET_EFAULT;
10268             }
10269         }
10270         return ret;
10271 #ifdef TARGET_NR_stat
10272     case TARGET_NR_stat:
10273         if (!(p = lock_user_string(arg1))) {
10274             return -TARGET_EFAULT;
10275         }
10276         ret = get_errno(stat(path(p), &st));
10277         unlock_user(p, arg1, 0);
10278         goto do_stat;
10279 #endif
10280 #ifdef TARGET_NR_lstat
10281     case TARGET_NR_lstat:
10282         if (!(p = lock_user_string(arg1))) {
10283             return -TARGET_EFAULT;
10284         }
10285         ret = get_errno(lstat(path(p), &st));
10286         unlock_user(p, arg1, 0);
10287         goto do_stat;
10288 #endif
10289 #ifdef TARGET_NR_fstat
10290     case TARGET_NR_fstat:
10291         {
10292             ret = get_errno(fstat(arg1, &st));
10293 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10294         do_stat:
10295 #endif
10296             if (!is_error(ret)) {
10297                 struct target_stat *target_st;
10298 
10299                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10300                     return -TARGET_EFAULT;
10301                 memset(target_st, 0, sizeof(*target_st));
10302                 __put_user(st.st_dev, &target_st->st_dev);
10303                 __put_user(st.st_ino, &target_st->st_ino);
10304                 __put_user(st.st_mode, &target_st->st_mode);
10305                 __put_user(st.st_uid, &target_st->st_uid);
10306                 __put_user(st.st_gid, &target_st->st_gid);
10307                 __put_user(st.st_nlink, &target_st->st_nlink);
10308                 __put_user(st.st_rdev, &target_st->st_rdev);
10309                 __put_user(st.st_size, &target_st->st_size);
10310                 __put_user(st.st_blksize, &target_st->st_blksize);
10311                 __put_user(st.st_blocks, &target_st->st_blocks);
10312                 __put_user(st.st_atime, &target_st->target_st_atime);
10313                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10314                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10315 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10316                 __put_user(st.st_atim.tv_nsec,
10317                            &target_st->target_st_atime_nsec);
10318                 __put_user(st.st_mtim.tv_nsec,
10319                            &target_st->target_st_mtime_nsec);
10320                 __put_user(st.st_ctim.tv_nsec,
10321                            &target_st->target_st_ctime_nsec);
10322 #endif
10323                 unlock_user_struct(target_st, arg2, 1);
10324             }
10325         }
10326         return ret;
10327 #endif
10328     case TARGET_NR_vhangup:
10329         return get_errno(vhangup());
10330 #ifdef TARGET_NR_syscall
10331     case TARGET_NR_syscall:
10332         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10333                           arg6, arg7, arg8, 0);
10334 #endif
10335 #if defined(TARGET_NR_wait4)
10336     case TARGET_NR_wait4:
10337         {
10338             int status;
10339             abi_long status_ptr = arg2;
10340             struct rusage rusage, *rusage_ptr;
10341             abi_ulong target_rusage = arg4;
10342             abi_long rusage_err;
10343             if (target_rusage)
10344                 rusage_ptr = &rusage;
10345             else
10346                 rusage_ptr = NULL;
10347             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10348             if (!is_error(ret)) {
10349                 if (status_ptr && ret) {
10350                     status = host_to_target_waitstatus(status);
10351                     if (put_user_s32(status, status_ptr))
10352                         return -TARGET_EFAULT;
10353                 }
10354                 if (target_rusage) {
10355                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10356                     if (rusage_err) {
10357                         ret = rusage_err;
10358                     }
10359                 }
10360             }
10361         }
10362         return ret;
10363 #endif
10364 #ifdef TARGET_NR_swapoff
10365     case TARGET_NR_swapoff:
10366         if (!(p = lock_user_string(arg1)))
10367             return -TARGET_EFAULT;
10368         ret = get_errno(swapoff(p));
10369         unlock_user(p, arg1, 0);
10370         return ret;
10371 #endif
10372     case TARGET_NR_sysinfo:
10373         {
10374             struct target_sysinfo *target_value;
10375             struct sysinfo value;
10376             ret = get_errno(sysinfo(&value));
10377             if (!is_error(ret) && arg1)
10378             {
10379                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10380                     return -TARGET_EFAULT;
10381                 __put_user(value.uptime, &target_value->uptime);
10382                 __put_user(value.loads[0], &target_value->loads[0]);
10383                 __put_user(value.loads[1], &target_value->loads[1]);
10384                 __put_user(value.loads[2], &target_value->loads[2]);
10385                 __put_user(value.totalram, &target_value->totalram);
10386                 __put_user(value.freeram, &target_value->freeram);
10387                 __put_user(value.sharedram, &target_value->sharedram);
10388                 __put_user(value.bufferram, &target_value->bufferram);
10389                 __put_user(value.totalswap, &target_value->totalswap);
10390                 __put_user(value.freeswap, &target_value->freeswap);
10391                 __put_user(value.procs, &target_value->procs);
10392                 __put_user(value.totalhigh, &target_value->totalhigh);
10393                 __put_user(value.freehigh, &target_value->freehigh);
10394                 __put_user(value.mem_unit, &target_value->mem_unit);
10395                 unlock_user_struct(target_value, arg1, 1);
10396             }
10397         }
10398         return ret;
10399 #ifdef TARGET_NR_ipc
10400     case TARGET_NR_ipc:
10401         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10402 #endif
10403 #ifdef TARGET_NR_semget
10404     case TARGET_NR_semget:
10405         return get_errno(semget(arg1, arg2, arg3));
10406 #endif
10407 #ifdef TARGET_NR_semop
10408     case TARGET_NR_semop:
10409         return do_semtimedop(arg1, arg2, arg3, 0, false);
10410 #endif
10411 #ifdef TARGET_NR_semtimedop
10412     case TARGET_NR_semtimedop:
10413         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10414 #endif
10415 #ifdef TARGET_NR_semtimedop_time64
10416     case TARGET_NR_semtimedop_time64:
10417         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10418 #endif
10419 #ifdef TARGET_NR_semctl
10420     case TARGET_NR_semctl:
10421         return do_semctl(arg1, arg2, arg3, arg4);
10422 #endif
10423 #ifdef TARGET_NR_msgctl
10424     case TARGET_NR_msgctl:
10425         return do_msgctl(arg1, arg2, arg3);
10426 #endif
10427 #ifdef TARGET_NR_msgget
10428     case TARGET_NR_msgget:
10429         return get_errno(msgget(arg1, arg2));
10430 #endif
10431 #ifdef TARGET_NR_msgrcv
10432     case TARGET_NR_msgrcv:
10433         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10434 #endif
10435 #ifdef TARGET_NR_msgsnd
10436     case TARGET_NR_msgsnd:
10437         return do_msgsnd(arg1, arg2, arg3, arg4);
10438 #endif
10439 #ifdef TARGET_NR_shmget
10440     case TARGET_NR_shmget:
10441         return get_errno(shmget(arg1, arg2, arg3));
10442 #endif
10443 #ifdef TARGET_NR_shmctl
10444     case TARGET_NR_shmctl:
10445         return do_shmctl(arg1, arg2, arg3);
10446 #endif
10447 #ifdef TARGET_NR_shmat
10448     case TARGET_NR_shmat:
10449         return do_shmat(cpu_env, arg1, arg2, arg3);
10450 #endif
10451 #ifdef TARGET_NR_shmdt
10452     case TARGET_NR_shmdt:
10453         return do_shmdt(arg1);
10454 #endif
10455     case TARGET_NR_fsync:
10456         return get_errno(fsync(arg1));
10457     case TARGET_NR_clone:
10458         /* Linux manages to have three different orderings for its
10459          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10460          * match the kernel's CONFIG_CLONE_* settings.
10461          * Microblaze is further special in that it uses a sixth
10462          * implicit argument to clone for the TLS pointer.
10463          */
10464 #if defined(TARGET_MICROBLAZE)
10465         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10466 #elif defined(TARGET_CLONE_BACKWARDS)
10467         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10468 #elif defined(TARGET_CLONE_BACKWARDS2)
10469         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10470 #else
10471         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10472 #endif
10473         return ret;
10474 #ifdef __NR_exit_group
10475         /* new thread calls */
10476     case TARGET_NR_exit_group:
10477         preexit_cleanup(cpu_env, arg1);
10478         return get_errno(exit_group(arg1));
10479 #endif
10480     case TARGET_NR_setdomainname:
10481         if (!(p = lock_user_string(arg1)))
10482             return -TARGET_EFAULT;
10483         ret = get_errno(setdomainname(p, arg2));
10484         unlock_user(p, arg1, 0);
10485         return ret;
10486     case TARGET_NR_uname:
10487         /* no need to transcode because we use the linux syscall */
10488         {
10489             struct new_utsname * buf;
10490 
10491             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10492                 return -TARGET_EFAULT;
10493             ret = get_errno(sys_uname(buf));
10494             if (!is_error(ret)) {
10495                 /* Overwrite the native machine name with whatever is being
10496                    emulated. */
10497                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10498                           sizeof(buf->machine));
10499                 /* Allow the user to override the reported release.  */
10500                 if (qemu_uname_release && *qemu_uname_release) {
10501                     g_strlcpy(buf->release, qemu_uname_release,
10502                               sizeof(buf->release));
10503                 }
10504             }
10505             unlock_user_struct(buf, arg1, 1);
10506         }
10507         return ret;
10508 #ifdef TARGET_I386
10509     case TARGET_NR_modify_ldt:
10510         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10511 #if !defined(TARGET_X86_64)
10512     case TARGET_NR_vm86:
10513         return do_vm86(cpu_env, arg1, arg2);
10514 #endif
10515 #endif
10516 #if defined(TARGET_NR_adjtimex)
10517     case TARGET_NR_adjtimex:
10518         {
10519             struct timex host_buf;
10520 
10521             if (target_to_host_timex(&host_buf, arg1) != 0) {
10522                 return -TARGET_EFAULT;
10523             }
10524             ret = get_errno(adjtimex(&host_buf));
10525             if (!is_error(ret)) {
10526                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10527                     return -TARGET_EFAULT;
10528                 }
10529             }
10530         }
10531         return ret;
10532 #endif
10533 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10534     case TARGET_NR_clock_adjtime:
10535         {
10536             struct timex htx, *phtx = &htx;
10537 
10538             if (target_to_host_timex(phtx, arg2) != 0) {
10539                 return -TARGET_EFAULT;
10540             }
10541             ret = get_errno(clock_adjtime(arg1, phtx));
10542             if (!is_error(ret) && phtx) {
10543                 if (host_to_target_timex(arg2, phtx) != 0) {
10544                     return -TARGET_EFAULT;
10545                 }
10546             }
10547         }
10548         return ret;
10549 #endif
10550 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10551     case TARGET_NR_clock_adjtime64:
10552         {
10553             struct timex htx;
10554 
10555             if (target_to_host_timex64(&htx, arg2) != 0) {
10556                 return -TARGET_EFAULT;
10557             }
10558             ret = get_errno(clock_adjtime(arg1, &htx));
10559             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10560                     return -TARGET_EFAULT;
10561             }
10562         }
10563         return ret;
10564 #endif
10565     case TARGET_NR_getpgid:
10566         return get_errno(getpgid(arg1));
10567     case TARGET_NR_fchdir:
10568         return get_errno(fchdir(arg1));
10569     case TARGET_NR_personality:
10570         return get_errno(personality(arg1));
10571 #ifdef TARGET_NR__llseek /* Not on alpha */
10572     case TARGET_NR__llseek:
10573         {
10574             int64_t res;
10575 #if !defined(__NR_llseek)
10576             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10577             if (res == -1) {
10578                 ret = get_errno(res);
10579             } else {
10580                 ret = 0;
10581             }
10582 #else
10583             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10584 #endif
10585             if ((ret == 0) && put_user_s64(res, arg4)) {
10586                 return -TARGET_EFAULT;
10587             }
10588         }
10589         return ret;
10590 #endif
10591 #ifdef TARGET_NR_getdents
10592     case TARGET_NR_getdents:
10593         return do_getdents(arg1, arg2, arg3);
10594 #endif /* TARGET_NR_getdents */
10595 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10596     case TARGET_NR_getdents64:
10597         return do_getdents64(arg1, arg2, arg3);
10598 #endif /* TARGET_NR_getdents64 */
10599 #if defined(TARGET_NR__newselect)
10600     case TARGET_NR__newselect:
10601         return do_select(arg1, arg2, arg3, arg4, arg5);
10602 #endif
10603 #ifdef TARGET_NR_poll
10604     case TARGET_NR_poll:
10605         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10606 #endif
10607 #ifdef TARGET_NR_ppoll
10608     case TARGET_NR_ppoll:
10609         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10610 #endif
10611 #ifdef TARGET_NR_ppoll_time64
10612     case TARGET_NR_ppoll_time64:
10613         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10614 #endif
10615     case TARGET_NR_flock:
10616         /* NOTE: the flock constant seems to be the same for every
10617            Linux platform */
10618         return get_errno(safe_flock(arg1, arg2));
10619     case TARGET_NR_readv:
10620         {
10621             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10622             if (vec != NULL) {
10623                 ret = get_errno(safe_readv(arg1, vec, arg3));
10624                 unlock_iovec(vec, arg2, arg3, 1);
10625             } else {
10626                 ret = -host_to_target_errno(errno);
10627             }
10628         }
10629         return ret;
10630     case TARGET_NR_writev:
10631         {
10632             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10633             if (vec != NULL) {
10634                 ret = get_errno(safe_writev(arg1, vec, arg3));
10635                 unlock_iovec(vec, arg2, arg3, 0);
10636             } else {
10637                 ret = -host_to_target_errno(errno);
10638             }
10639         }
10640         return ret;
10641 #if defined(TARGET_NR_preadv)
10642     case TARGET_NR_preadv:
10643         {
10644             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10645             if (vec != NULL) {
10646                 unsigned long low, high;
10647 
10648                 target_to_host_low_high(arg4, arg5, &low, &high);
10649                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10650                 unlock_iovec(vec, arg2, arg3, 1);
10651             } else {
10652                 ret = -host_to_target_errno(errno);
10653            }
10654         }
10655         return ret;
10656 #endif
10657 #if defined(TARGET_NR_pwritev)
10658     case TARGET_NR_pwritev:
10659         {
10660             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10661             if (vec != NULL) {
10662                 unsigned long low, high;
10663 
10664                 target_to_host_low_high(arg4, arg5, &low, &high);
10665                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10666                 unlock_iovec(vec, arg2, arg3, 0);
10667             } else {
10668                 ret = -host_to_target_errno(errno);
10669            }
10670         }
10671         return ret;
10672 #endif
10673     case TARGET_NR_getsid:
10674         return get_errno(getsid(arg1));
10675 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10676     case TARGET_NR_fdatasync:
10677         return get_errno(fdatasync(arg1));
10678 #endif
10679     case TARGET_NR_sched_getaffinity:
10680         {
10681             unsigned int mask_size;
10682             unsigned long *mask;
10683 
10684             /*
10685              * sched_getaffinity needs multiples of ulong, so need to take
10686              * care of mismatches between target ulong and host ulong sizes.
10687              */
10688             if (arg2 & (sizeof(abi_ulong) - 1)) {
10689                 return -TARGET_EINVAL;
10690             }
10691             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10692 
10693             mask = alloca(mask_size);
10694             memset(mask, 0, mask_size);
10695             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10696 
10697             if (!is_error(ret)) {
10698                 if (ret > arg2) {
10699                     /* More data returned than the caller's buffer will fit.
10700                      * This only happens if sizeof(abi_long) < sizeof(long)
10701                      * and the caller passed us a buffer holding an odd number
10702                      * of abi_longs. If the host kernel is actually using the
10703                      * extra 4 bytes then fail EINVAL; otherwise we can just
10704                      * ignore them and only copy the interesting part.
10705                      */
10706                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10707                     if (numcpus > arg2 * 8) {
10708                         return -TARGET_EINVAL;
10709                     }
10710                     ret = arg2;
10711                 }
10712 
10713                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10714                     return -TARGET_EFAULT;
10715                 }
10716             }
10717         }
10718         return ret;
10719     case TARGET_NR_sched_setaffinity:
10720         {
10721             unsigned int mask_size;
10722             unsigned long *mask;
10723 
10724             /*
10725              * sched_setaffinity needs multiples of ulong, so need to take
10726              * care of mismatches between target ulong and host ulong sizes.
10727              */
10728             if (arg2 & (sizeof(abi_ulong) - 1)) {
10729                 return -TARGET_EINVAL;
10730             }
10731             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10732             mask = alloca(mask_size);
10733 
10734             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10735             if (ret) {
10736                 return ret;
10737             }
10738 
10739             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10740         }
10741     case TARGET_NR_getcpu:
10742         {
10743             unsigned cpu, node;
10744             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10745                                        arg2 ? &node : NULL,
10746                                        NULL));
10747             if (is_error(ret)) {
10748                 return ret;
10749             }
10750             if (arg1 && put_user_u32(cpu, arg1)) {
10751                 return -TARGET_EFAULT;
10752             }
10753             if (arg2 && put_user_u32(node, arg2)) {
10754                 return -TARGET_EFAULT;
10755             }
10756         }
10757         return ret;
10758     case TARGET_NR_sched_setparam:
10759         {
10760             struct sched_param *target_schp;
10761             struct sched_param schp;
10762 
10763             if (arg2 == 0) {
10764                 return -TARGET_EINVAL;
10765             }
10766             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10767                 return -TARGET_EFAULT;
10768             schp.sched_priority = tswap32(target_schp->sched_priority);
10769             unlock_user_struct(target_schp, arg2, 0);
10770             return get_errno(sched_setparam(arg1, &schp));
10771         }
10772     case TARGET_NR_sched_getparam:
10773         {
10774             struct sched_param *target_schp;
10775             struct sched_param schp;
10776 
10777             if (arg2 == 0) {
10778                 return -TARGET_EINVAL;
10779             }
10780             ret = get_errno(sched_getparam(arg1, &schp));
10781             if (!is_error(ret)) {
10782                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10783                     return -TARGET_EFAULT;
10784                 target_schp->sched_priority = tswap32(schp.sched_priority);
10785                 unlock_user_struct(target_schp, arg2, 1);
10786             }
10787         }
10788         return ret;
10789     case TARGET_NR_sched_setscheduler:
10790         {
10791             struct sched_param *target_schp;
10792             struct sched_param schp;
10793             if (arg3 == 0) {
10794                 return -TARGET_EINVAL;
10795             }
10796             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10797                 return -TARGET_EFAULT;
10798             schp.sched_priority = tswap32(target_schp->sched_priority);
10799             unlock_user_struct(target_schp, arg3, 0);
10800             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10801         }
10802     case TARGET_NR_sched_getscheduler:
10803         return get_errno(sched_getscheduler(arg1));
10804     case TARGET_NR_sched_yield:
10805         return get_errno(sched_yield());
10806     case TARGET_NR_sched_get_priority_max:
10807         return get_errno(sched_get_priority_max(arg1));
10808     case TARGET_NR_sched_get_priority_min:
10809         return get_errno(sched_get_priority_min(arg1));
10810 #ifdef TARGET_NR_sched_rr_get_interval
10811     case TARGET_NR_sched_rr_get_interval:
10812         {
10813             struct timespec ts;
10814             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10815             if (!is_error(ret)) {
10816                 ret = host_to_target_timespec(arg2, &ts);
10817             }
10818         }
10819         return ret;
10820 #endif
10821 #ifdef TARGET_NR_sched_rr_get_interval_time64
10822     case TARGET_NR_sched_rr_get_interval_time64:
10823         {
10824             struct timespec ts;
10825             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10826             if (!is_error(ret)) {
10827                 ret = host_to_target_timespec64(arg2, &ts);
10828             }
10829         }
10830         return ret;
10831 #endif
10832 #if defined(TARGET_NR_nanosleep)
10833     case TARGET_NR_nanosleep:
10834         {
10835             struct timespec req, rem;
10836             target_to_host_timespec(&req, arg1);
10837             ret = get_errno(safe_nanosleep(&req, &rem));
10838             if (is_error(ret) && arg2) {
10839                 host_to_target_timespec(arg2, &rem);
10840             }
10841         }
10842         return ret;
10843 #endif
10844     case TARGET_NR_prctl:
10845         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10846         break;
10847 #ifdef TARGET_NR_arch_prctl
10848     case TARGET_NR_arch_prctl:
10849         return do_arch_prctl(cpu_env, arg1, arg2);
10850 #endif
10851 #ifdef TARGET_NR_pread64
10852     case TARGET_NR_pread64:
10853         if (regpairs_aligned(cpu_env, num)) {
10854             arg4 = arg5;
10855             arg5 = arg6;
10856         }
10857         if (arg2 == 0 && arg3 == 0) {
10858             /* Special-case NULL buffer and zero length, which should succeed */
10859             p = 0;
10860         } else {
10861             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10862             if (!p) {
10863                 return -TARGET_EFAULT;
10864             }
10865         }
10866         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10867         unlock_user(p, arg2, ret);
10868         return ret;
10869     case TARGET_NR_pwrite64:
10870         if (regpairs_aligned(cpu_env, num)) {
10871             arg4 = arg5;
10872             arg5 = arg6;
10873         }
10874         if (arg2 == 0 && arg3 == 0) {
10875             /* Special-case NULL buffer and zero length, which should succeed */
10876             p = 0;
10877         } else {
10878             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10879             if (!p) {
10880                 return -TARGET_EFAULT;
10881             }
10882         }
10883         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10884         unlock_user(p, arg2, 0);
10885         return ret;
10886 #endif
10887     case TARGET_NR_getcwd:
10888         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10889             return -TARGET_EFAULT;
10890         ret = get_errno(sys_getcwd1(p, arg2));
10891         unlock_user(p, arg1, ret);
10892         return ret;
10893     case TARGET_NR_capget:
10894     case TARGET_NR_capset:
10895     {
10896         struct target_user_cap_header *target_header;
10897         struct target_user_cap_data *target_data = NULL;
10898         struct __user_cap_header_struct header;
10899         struct __user_cap_data_struct data[2];
10900         struct __user_cap_data_struct *dataptr = NULL;
10901         int i, target_datalen;
10902         int data_items = 1;
10903 
10904         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10905             return -TARGET_EFAULT;
10906         }
10907         header.version = tswap32(target_header->version);
10908         header.pid = tswap32(target_header->pid);
10909 
10910         if (header.version != _LINUX_CAPABILITY_VERSION) {
10911             /* Version 2 and up takes pointer to two user_data structs */
10912             data_items = 2;
10913         }
10914 
10915         target_datalen = sizeof(*target_data) * data_items;
10916 
10917         if (arg2) {
10918             if (num == TARGET_NR_capget) {
10919                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10920             } else {
10921                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10922             }
10923             if (!target_data) {
10924                 unlock_user_struct(target_header, arg1, 0);
10925                 return -TARGET_EFAULT;
10926             }
10927 
10928             if (num == TARGET_NR_capset) {
10929                 for (i = 0; i < data_items; i++) {
10930                     data[i].effective = tswap32(target_data[i].effective);
10931                     data[i].permitted = tswap32(target_data[i].permitted);
10932                     data[i].inheritable = tswap32(target_data[i].inheritable);
10933                 }
10934             }
10935 
10936             dataptr = data;
10937         }
10938 
10939         if (num == TARGET_NR_capget) {
10940             ret = get_errno(capget(&header, dataptr));
10941         } else {
10942             ret = get_errno(capset(&header, dataptr));
10943         }
10944 
10945         /* The kernel always updates version for both capget and capset */
10946         target_header->version = tswap32(header.version);
10947         unlock_user_struct(target_header, arg1, 1);
10948 
10949         if (arg2) {
10950             if (num == TARGET_NR_capget) {
10951                 for (i = 0; i < data_items; i++) {
10952                     target_data[i].effective = tswap32(data[i].effective);
10953                     target_data[i].permitted = tswap32(data[i].permitted);
10954                     target_data[i].inheritable = tswap32(data[i].inheritable);
10955                 }
10956                 unlock_user(target_data, arg2, target_datalen);
10957             } else {
10958                 unlock_user(target_data, arg2, 0);
10959             }
10960         }
10961         return ret;
10962     }
10963     case TARGET_NR_sigaltstack:
10964         return do_sigaltstack(arg1, arg2, cpu_env);
10965 
10966 #ifdef CONFIG_SENDFILE
10967 #ifdef TARGET_NR_sendfile
10968     case TARGET_NR_sendfile:
10969     {
10970         off_t *offp = NULL;
10971         off_t off;
10972         if (arg3) {
10973             ret = get_user_sal(off, arg3);
10974             if (is_error(ret)) {
10975                 return ret;
10976             }
10977             offp = &off;
10978         }
10979         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10980         if (!is_error(ret) && arg3) {
10981             abi_long ret2 = put_user_sal(off, arg3);
10982             if (is_error(ret2)) {
10983                 ret = ret2;
10984             }
10985         }
10986         return ret;
10987     }
10988 #endif
10989 #ifdef TARGET_NR_sendfile64
10990     case TARGET_NR_sendfile64:
10991     {
10992         off_t *offp = NULL;
10993         off_t off;
10994         if (arg3) {
10995             ret = get_user_s64(off, arg3);
10996             if (is_error(ret)) {
10997                 return ret;
10998             }
10999             offp = &off;
11000         }
11001         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11002         if (!is_error(ret) && arg3) {
11003             abi_long ret2 = put_user_s64(off, arg3);
11004             if (is_error(ret2)) {
11005                 ret = ret2;
11006             }
11007         }
11008         return ret;
11009     }
11010 #endif
11011 #endif
11012 #ifdef TARGET_NR_vfork
11013     case TARGET_NR_vfork:
11014         return get_errno(do_fork(cpu_env,
11015                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11016                          0, 0, 0, 0));
11017 #endif
11018 #ifdef TARGET_NR_ugetrlimit
11019     case TARGET_NR_ugetrlimit:
11020     {
11021 	struct rlimit rlim;
11022 	int resource = target_to_host_resource(arg1);
11023 	ret = get_errno(getrlimit(resource, &rlim));
11024 	if (!is_error(ret)) {
11025 	    struct target_rlimit *target_rlim;
11026             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11027                 return -TARGET_EFAULT;
11028 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11029 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11030             unlock_user_struct(target_rlim, arg2, 1);
11031 	}
11032         return ret;
11033     }
11034 #endif
11035 #ifdef TARGET_NR_truncate64
11036     case TARGET_NR_truncate64:
11037         if (!(p = lock_user_string(arg1)))
11038             return -TARGET_EFAULT;
11039 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11040         unlock_user(p, arg1, 0);
11041         return ret;
11042 #endif
11043 #ifdef TARGET_NR_ftruncate64
11044     case TARGET_NR_ftruncate64:
11045         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11046 #endif
11047 #ifdef TARGET_NR_stat64
11048     case TARGET_NR_stat64:
11049         if (!(p = lock_user_string(arg1))) {
11050             return -TARGET_EFAULT;
11051         }
11052         ret = get_errno(stat(path(p), &st));
11053         unlock_user(p, arg1, 0);
11054         if (!is_error(ret))
11055             ret = host_to_target_stat64(cpu_env, arg2, &st);
11056         return ret;
11057 #endif
11058 #ifdef TARGET_NR_lstat64
11059     case TARGET_NR_lstat64:
11060         if (!(p = lock_user_string(arg1))) {
11061             return -TARGET_EFAULT;
11062         }
11063         ret = get_errno(lstat(path(p), &st));
11064         unlock_user(p, arg1, 0);
11065         if (!is_error(ret))
11066             ret = host_to_target_stat64(cpu_env, arg2, &st);
11067         return ret;
11068 #endif
11069 #ifdef TARGET_NR_fstat64
11070     case TARGET_NR_fstat64:
11071         ret = get_errno(fstat(arg1, &st));
11072         if (!is_error(ret))
11073             ret = host_to_target_stat64(cpu_env, arg2, &st);
11074         return ret;
11075 #endif
11076 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11077 #ifdef TARGET_NR_fstatat64
11078     case TARGET_NR_fstatat64:
11079 #endif
11080 #ifdef TARGET_NR_newfstatat
11081     case TARGET_NR_newfstatat:
11082 #endif
11083         if (!(p = lock_user_string(arg2))) {
11084             return -TARGET_EFAULT;
11085         }
11086         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11087         unlock_user(p, arg2, 0);
11088         if (!is_error(ret))
11089             ret = host_to_target_stat64(cpu_env, arg3, &st);
11090         return ret;
11091 #endif
11092 #if defined(TARGET_NR_statx)
11093     case TARGET_NR_statx:
11094         {
11095             struct target_statx *target_stx;
11096             int dirfd = arg1;
11097             int flags = arg3;
11098 
11099             p = lock_user_string(arg2);
11100             if (p == NULL) {
11101                 return -TARGET_EFAULT;
11102             }
11103 #if defined(__NR_statx)
11104             {
11105                 /*
11106                  * It is assumed that struct statx is architecture independent.
11107                  */
11108                 struct target_statx host_stx;
11109                 int mask = arg4;
11110 
11111                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11112                 if (!is_error(ret)) {
11113                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11114                         unlock_user(p, arg2, 0);
11115                         return -TARGET_EFAULT;
11116                     }
11117                 }
11118 
11119                 if (ret != -TARGET_ENOSYS) {
11120                     unlock_user(p, arg2, 0);
11121                     return ret;
11122                 }
11123             }
11124 #endif
11125             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11126             unlock_user(p, arg2, 0);
11127 
11128             if (!is_error(ret)) {
11129                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11130                     return -TARGET_EFAULT;
11131                 }
11132                 memset(target_stx, 0, sizeof(*target_stx));
11133                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11134                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11135                 __put_user(st.st_ino, &target_stx->stx_ino);
11136                 __put_user(st.st_mode, &target_stx->stx_mode);
11137                 __put_user(st.st_uid, &target_stx->stx_uid);
11138                 __put_user(st.st_gid, &target_stx->stx_gid);
11139                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11140                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11141                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11142                 __put_user(st.st_size, &target_stx->stx_size);
11143                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11144                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11145                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11146                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11147                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11148                 unlock_user_struct(target_stx, arg5, 1);
11149             }
11150         }
11151         return ret;
11152 #endif
11153 #ifdef TARGET_NR_lchown
11154     case TARGET_NR_lchown:
11155         if (!(p = lock_user_string(arg1)))
11156             return -TARGET_EFAULT;
11157         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11158         unlock_user(p, arg1, 0);
11159         return ret;
11160 #endif
11161 #ifdef TARGET_NR_getuid
11162     case TARGET_NR_getuid:
11163         return get_errno(high2lowuid(getuid()));
11164 #endif
11165 #ifdef TARGET_NR_getgid
11166     case TARGET_NR_getgid:
11167         return get_errno(high2lowgid(getgid()));
11168 #endif
11169 #ifdef TARGET_NR_geteuid
11170     case TARGET_NR_geteuid:
11171         return get_errno(high2lowuid(geteuid()));
11172 #endif
11173 #ifdef TARGET_NR_getegid
11174     case TARGET_NR_getegid:
11175         return get_errno(high2lowgid(getegid()));
11176 #endif
11177     case TARGET_NR_setreuid:
11178         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11179     case TARGET_NR_setregid:
11180         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11181     case TARGET_NR_getgroups:
11182         {
11183             int gidsetsize = arg1;
11184             target_id *target_grouplist;
11185             gid_t *grouplist;
11186             int i;
11187 
11188             grouplist = alloca(gidsetsize * sizeof(gid_t));
11189             ret = get_errno(getgroups(gidsetsize, grouplist));
11190             if (gidsetsize == 0)
11191                 return ret;
11192             if (!is_error(ret)) {
11193                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11194                 if (!target_grouplist)
11195                     return -TARGET_EFAULT;
11196                 for(i = 0;i < ret; i++)
11197                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11198                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11199             }
11200         }
11201         return ret;
11202     case TARGET_NR_setgroups:
11203         {
11204             int gidsetsize = arg1;
11205             target_id *target_grouplist;
11206             gid_t *grouplist = NULL;
11207             int i;
11208             if (gidsetsize) {
11209                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11210                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11211                 if (!target_grouplist) {
11212                     return -TARGET_EFAULT;
11213                 }
11214                 for (i = 0; i < gidsetsize; i++) {
11215                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11216                 }
11217                 unlock_user(target_grouplist, arg2, 0);
11218             }
11219             return get_errno(setgroups(gidsetsize, grouplist));
11220         }
11221     case TARGET_NR_fchown:
11222         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11223 #if defined(TARGET_NR_fchownat)
11224     case TARGET_NR_fchownat:
11225         if (!(p = lock_user_string(arg2)))
11226             return -TARGET_EFAULT;
11227         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11228                                  low2highgid(arg4), arg5));
11229         unlock_user(p, arg2, 0);
11230         return ret;
11231 #endif
11232 #ifdef TARGET_NR_setresuid
11233     case TARGET_NR_setresuid:
11234         return get_errno(sys_setresuid(low2highuid(arg1),
11235                                        low2highuid(arg2),
11236                                        low2highuid(arg3)));
11237 #endif
11238 #ifdef TARGET_NR_getresuid
11239     case TARGET_NR_getresuid:
11240         {
11241             uid_t ruid, euid, suid;
11242             ret = get_errno(getresuid(&ruid, &euid, &suid));
11243             if (!is_error(ret)) {
11244                 if (put_user_id(high2lowuid(ruid), arg1)
11245                     || put_user_id(high2lowuid(euid), arg2)
11246                     || put_user_id(high2lowuid(suid), arg3))
11247                     return -TARGET_EFAULT;
11248             }
11249         }
11250         return ret;
11251 #endif
11252 #ifdef TARGET_NR_getresgid
11253     case TARGET_NR_setresgid:
11254         return get_errno(sys_setresgid(low2highgid(arg1),
11255                                        low2highgid(arg2),
11256                                        low2highgid(arg3)));
11257 #endif
11258 #ifdef TARGET_NR_getresgid
11259     case TARGET_NR_getresgid:
11260         {
11261             gid_t rgid, egid, sgid;
11262             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11263             if (!is_error(ret)) {
11264                 if (put_user_id(high2lowgid(rgid), arg1)
11265                     || put_user_id(high2lowgid(egid), arg2)
11266                     || put_user_id(high2lowgid(sgid), arg3))
11267                     return -TARGET_EFAULT;
11268             }
11269         }
11270         return ret;
11271 #endif
11272 #ifdef TARGET_NR_chown
11273     case TARGET_NR_chown:
11274         if (!(p = lock_user_string(arg1)))
11275             return -TARGET_EFAULT;
11276         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11277         unlock_user(p, arg1, 0);
11278         return ret;
11279 #endif
11280     case TARGET_NR_setuid:
11281         return get_errno(sys_setuid(low2highuid(arg1)));
11282     case TARGET_NR_setgid:
11283         return get_errno(sys_setgid(low2highgid(arg1)));
11284     case TARGET_NR_setfsuid:
11285         return get_errno(setfsuid(arg1));
11286     case TARGET_NR_setfsgid:
11287         return get_errno(setfsgid(arg1));
11288 
11289 #ifdef TARGET_NR_lchown32
11290     case TARGET_NR_lchown32:
11291         if (!(p = lock_user_string(arg1)))
11292             return -TARGET_EFAULT;
11293         ret = get_errno(lchown(p, arg2, arg3));
11294         unlock_user(p, arg1, 0);
11295         return ret;
11296 #endif
11297 #ifdef TARGET_NR_getuid32
11298     case TARGET_NR_getuid32:
11299         return get_errno(getuid());
11300 #endif
11301 
11302 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11303    /* Alpha specific */
11304     case TARGET_NR_getxuid:
11305          {
11306             uid_t euid;
11307             euid=geteuid();
11308             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11309          }
11310         return get_errno(getuid());
11311 #endif
11312 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11313    /* Alpha specific */
11314     case TARGET_NR_getxgid:
11315          {
11316             uid_t egid;
11317             egid=getegid();
11318             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11319          }
11320         return get_errno(getgid());
11321 #endif
11322 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11323     /* Alpha specific */
11324     case TARGET_NR_osf_getsysinfo:
11325         ret = -TARGET_EOPNOTSUPP;
11326         switch (arg1) {
11327           case TARGET_GSI_IEEE_FP_CONTROL:
11328             {
11329                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11330                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11331 
11332                 swcr &= ~SWCR_STATUS_MASK;
11333                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11334 
11335                 if (put_user_u64 (swcr, arg2))
11336                         return -TARGET_EFAULT;
11337                 ret = 0;
11338             }
11339             break;
11340 
11341           /* case GSI_IEEE_STATE_AT_SIGNAL:
11342              -- Not implemented in linux kernel.
11343              case GSI_UACPROC:
11344              -- Retrieves current unaligned access state; not much used.
11345              case GSI_PROC_TYPE:
11346              -- Retrieves implver information; surely not used.
11347              case GSI_GET_HWRPB:
11348              -- Grabs a copy of the HWRPB; surely not used.
11349           */
11350         }
11351         return ret;
11352 #endif
11353 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11354     /* Alpha specific */
11355     case TARGET_NR_osf_setsysinfo:
11356         ret = -TARGET_EOPNOTSUPP;
11357         switch (arg1) {
11358           case TARGET_SSI_IEEE_FP_CONTROL:
11359             {
11360                 uint64_t swcr, fpcr;
11361 
11362                 if (get_user_u64 (swcr, arg2)) {
11363                     return -TARGET_EFAULT;
11364                 }
11365 
11366                 /*
11367                  * The kernel calls swcr_update_status to update the
11368                  * status bits from the fpcr at every point that it
11369                  * could be queried.  Therefore, we store the status
11370                  * bits only in FPCR.
11371                  */
11372                 ((CPUAlphaState *)cpu_env)->swcr
11373                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11374 
11375                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11376                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11377                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11378                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11379                 ret = 0;
11380             }
11381             break;
11382 
11383           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11384             {
11385                 uint64_t exc, fpcr, fex;
11386 
11387                 if (get_user_u64(exc, arg2)) {
11388                     return -TARGET_EFAULT;
11389                 }
11390                 exc &= SWCR_STATUS_MASK;
11391                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11392 
11393                 /* Old exceptions are not signaled.  */
11394                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11395                 fex = exc & ~fex;
11396                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11397                 fex &= ((CPUArchState *)cpu_env)->swcr;
11398 
11399                 /* Update the hardware fpcr.  */
11400                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11401                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11402 
11403                 if (fex) {
11404                     int si_code = TARGET_FPE_FLTUNK;
11405                     target_siginfo_t info;
11406 
11407                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11408                         si_code = TARGET_FPE_FLTUND;
11409                     }
11410                     if (fex & SWCR_TRAP_ENABLE_INE) {
11411                         si_code = TARGET_FPE_FLTRES;
11412                     }
11413                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11414                         si_code = TARGET_FPE_FLTUND;
11415                     }
11416                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11417                         si_code = TARGET_FPE_FLTOVF;
11418                     }
11419                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11420                         si_code = TARGET_FPE_FLTDIV;
11421                     }
11422                     if (fex & SWCR_TRAP_ENABLE_INV) {
11423                         si_code = TARGET_FPE_FLTINV;
11424                     }
11425 
11426                     info.si_signo = SIGFPE;
11427                     info.si_errno = 0;
11428                     info.si_code = si_code;
11429                     info._sifields._sigfault._addr
11430                         = ((CPUArchState *)cpu_env)->pc;
11431                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11432                                  QEMU_SI_FAULT, &info);
11433                 }
11434                 ret = 0;
11435             }
11436             break;
11437 
11438           /* case SSI_NVPAIRS:
11439              -- Used with SSIN_UACPROC to enable unaligned accesses.
11440              case SSI_IEEE_STATE_AT_SIGNAL:
11441              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11442              -- Not implemented in linux kernel
11443           */
11444         }
11445         return ret;
11446 #endif
11447 #ifdef TARGET_NR_osf_sigprocmask
11448     /* Alpha specific.  */
11449     case TARGET_NR_osf_sigprocmask:
11450         {
11451             abi_ulong mask;
11452             int how;
11453             sigset_t set, oldset;
11454 
11455             switch(arg1) {
11456             case TARGET_SIG_BLOCK:
11457                 how = SIG_BLOCK;
11458                 break;
11459             case TARGET_SIG_UNBLOCK:
11460                 how = SIG_UNBLOCK;
11461                 break;
11462             case TARGET_SIG_SETMASK:
11463                 how = SIG_SETMASK;
11464                 break;
11465             default:
11466                 return -TARGET_EINVAL;
11467             }
11468             mask = arg2;
11469             target_to_host_old_sigset(&set, &mask);
11470             ret = do_sigprocmask(how, &set, &oldset);
11471             if (!ret) {
11472                 host_to_target_old_sigset(&mask, &oldset);
11473                 ret = mask;
11474             }
11475         }
11476         return ret;
11477 #endif
11478 
11479 #ifdef TARGET_NR_getgid32
11480     case TARGET_NR_getgid32:
11481         return get_errno(getgid());
11482 #endif
11483 #ifdef TARGET_NR_geteuid32
11484     case TARGET_NR_geteuid32:
11485         return get_errno(geteuid());
11486 #endif
11487 #ifdef TARGET_NR_getegid32
11488     case TARGET_NR_getegid32:
11489         return get_errno(getegid());
11490 #endif
11491 #ifdef TARGET_NR_setreuid32
11492     case TARGET_NR_setreuid32:
11493         return get_errno(setreuid(arg1, arg2));
11494 #endif
11495 #ifdef TARGET_NR_setregid32
11496     case TARGET_NR_setregid32:
11497         return get_errno(setregid(arg1, arg2));
11498 #endif
11499 #ifdef TARGET_NR_getgroups32
11500     case TARGET_NR_getgroups32:
11501         {
11502             int gidsetsize = arg1;
11503             uint32_t *target_grouplist;
11504             gid_t *grouplist;
11505             int i;
11506 
11507             grouplist = alloca(gidsetsize * sizeof(gid_t));
11508             ret = get_errno(getgroups(gidsetsize, grouplist));
11509             if (gidsetsize == 0)
11510                 return ret;
11511             if (!is_error(ret)) {
11512                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11513                 if (!target_grouplist) {
11514                     return -TARGET_EFAULT;
11515                 }
11516                 for(i = 0;i < ret; i++)
11517                     target_grouplist[i] = tswap32(grouplist[i]);
11518                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11519             }
11520         }
11521         return ret;
11522 #endif
11523 #ifdef TARGET_NR_setgroups32
11524     case TARGET_NR_setgroups32:
11525         {
11526             int gidsetsize = arg1;
11527             uint32_t *target_grouplist;
11528             gid_t *grouplist;
11529             int i;
11530 
11531             grouplist = alloca(gidsetsize * sizeof(gid_t));
11532             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11533             if (!target_grouplist) {
11534                 return -TARGET_EFAULT;
11535             }
11536             for(i = 0;i < gidsetsize; i++)
11537                 grouplist[i] = tswap32(target_grouplist[i]);
11538             unlock_user(target_grouplist, arg2, 0);
11539             return get_errno(setgroups(gidsetsize, grouplist));
11540         }
11541 #endif
11542 #ifdef TARGET_NR_fchown32
11543     case TARGET_NR_fchown32:
11544         return get_errno(fchown(arg1, arg2, arg3));
11545 #endif
11546 #ifdef TARGET_NR_setresuid32
11547     case TARGET_NR_setresuid32:
11548         return get_errno(sys_setresuid(arg1, arg2, arg3));
11549 #endif
11550 #ifdef TARGET_NR_getresuid32
11551     case TARGET_NR_getresuid32:
11552         {
11553             uid_t ruid, euid, suid;
11554             ret = get_errno(getresuid(&ruid, &euid, &suid));
11555             if (!is_error(ret)) {
11556                 if (put_user_u32(ruid, arg1)
11557                     || put_user_u32(euid, arg2)
11558                     || put_user_u32(suid, arg3))
11559                     return -TARGET_EFAULT;
11560             }
11561         }
11562         return ret;
11563 #endif
11564 #ifdef TARGET_NR_setresgid32
11565     case TARGET_NR_setresgid32:
11566         return get_errno(sys_setresgid(arg1, arg2, arg3));
11567 #endif
11568 #ifdef TARGET_NR_getresgid32
11569     case TARGET_NR_getresgid32:
11570         {
11571             gid_t rgid, egid, sgid;
11572             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11573             if (!is_error(ret)) {
11574                 if (put_user_u32(rgid, arg1)
11575                     || put_user_u32(egid, arg2)
11576                     || put_user_u32(sgid, arg3))
11577                     return -TARGET_EFAULT;
11578             }
11579         }
11580         return ret;
11581 #endif
11582 #ifdef TARGET_NR_chown32
11583     case TARGET_NR_chown32:
11584         if (!(p = lock_user_string(arg1)))
11585             return -TARGET_EFAULT;
11586         ret = get_errno(chown(p, arg2, arg3));
11587         unlock_user(p, arg1, 0);
11588         return ret;
11589 #endif
11590 #ifdef TARGET_NR_setuid32
11591     case TARGET_NR_setuid32:
11592         return get_errno(sys_setuid(arg1));
11593 #endif
11594 #ifdef TARGET_NR_setgid32
11595     case TARGET_NR_setgid32:
11596         return get_errno(sys_setgid(arg1));
11597 #endif
11598 #ifdef TARGET_NR_setfsuid32
11599     case TARGET_NR_setfsuid32:
11600         return get_errno(setfsuid(arg1));
11601 #endif
11602 #ifdef TARGET_NR_setfsgid32
11603     case TARGET_NR_setfsgid32:
11604         return get_errno(setfsgid(arg1));
11605 #endif
11606 #ifdef TARGET_NR_mincore
11607     case TARGET_NR_mincore:
11608         {
11609             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11610             if (!a) {
11611                 return -TARGET_ENOMEM;
11612             }
11613             p = lock_user_string(arg3);
11614             if (!p) {
11615                 ret = -TARGET_EFAULT;
11616             } else {
11617                 ret = get_errno(mincore(a, arg2, p));
11618                 unlock_user(p, arg3, ret);
11619             }
11620             unlock_user(a, arg1, 0);
11621         }
11622         return ret;
11623 #endif
11624 #ifdef TARGET_NR_arm_fadvise64_64
11625     case TARGET_NR_arm_fadvise64_64:
11626         /* arm_fadvise64_64 looks like fadvise64_64 but
11627          * with different argument order: fd, advice, offset, len
11628          * rather than the usual fd, offset, len, advice.
11629          * Note that offset and len are both 64-bit so appear as
11630          * pairs of 32-bit registers.
11631          */
11632         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11633                             target_offset64(arg5, arg6), arg2);
11634         return -host_to_target_errno(ret);
11635 #endif
11636 
11637 #if TARGET_ABI_BITS == 32
11638 
11639 #ifdef TARGET_NR_fadvise64_64
11640     case TARGET_NR_fadvise64_64:
11641 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11642         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11643         ret = arg2;
11644         arg2 = arg3;
11645         arg3 = arg4;
11646         arg4 = arg5;
11647         arg5 = arg6;
11648         arg6 = ret;
11649 #else
11650         /* 6 args: fd, offset (high, low), len (high, low), advice */
11651         if (regpairs_aligned(cpu_env, num)) {
11652             /* offset is in (3,4), len in (5,6) and advice in 7 */
11653             arg2 = arg3;
11654             arg3 = arg4;
11655             arg4 = arg5;
11656             arg5 = arg6;
11657             arg6 = arg7;
11658         }
11659 #endif
11660         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11661                             target_offset64(arg4, arg5), arg6);
11662         return -host_to_target_errno(ret);
11663 #endif
11664 
11665 #ifdef TARGET_NR_fadvise64
11666     case TARGET_NR_fadvise64:
11667         /* 5 args: fd, offset (high, low), len, advice */
11668         if (regpairs_aligned(cpu_env, num)) {
11669             /* offset is in (3,4), len in 5 and advice in 6 */
11670             arg2 = arg3;
11671             arg3 = arg4;
11672             arg4 = arg5;
11673             arg5 = arg6;
11674         }
11675         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11676         return -host_to_target_errno(ret);
11677 #endif
11678 
11679 #else /* not a 32-bit ABI */
11680 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11681 #ifdef TARGET_NR_fadvise64_64
11682     case TARGET_NR_fadvise64_64:
11683 #endif
11684 #ifdef TARGET_NR_fadvise64
11685     case TARGET_NR_fadvise64:
11686 #endif
11687 #ifdef TARGET_S390X
11688         switch (arg4) {
11689         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11690         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11691         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11692         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11693         default: break;
11694         }
11695 #endif
11696         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11697 #endif
11698 #endif /* end of 64-bit ABI fadvise handling */
11699 
11700 #ifdef TARGET_NR_madvise
11701     case TARGET_NR_madvise:
11702         /* A straight passthrough may not be safe because qemu sometimes
11703            turns private file-backed mappings into anonymous mappings.
11704            This will break MADV_DONTNEED.
11705            This is a hint, so ignoring and returning success is ok.  */
11706         return 0;
11707 #endif
11708 #ifdef TARGET_NR_fcntl64
11709     case TARGET_NR_fcntl64:
11710     {
11711         int cmd;
11712         struct flock64 fl;
11713         from_flock64_fn *copyfrom = copy_from_user_flock64;
11714         to_flock64_fn *copyto = copy_to_user_flock64;
11715 
11716 #ifdef TARGET_ARM
11717         if (!((CPUARMState *)cpu_env)->eabi) {
11718             copyfrom = copy_from_user_oabi_flock64;
11719             copyto = copy_to_user_oabi_flock64;
11720         }
11721 #endif
11722 
11723         cmd = target_to_host_fcntl_cmd(arg2);
11724         if (cmd == -TARGET_EINVAL) {
11725             return cmd;
11726         }
11727 
11728         switch(arg2) {
11729         case TARGET_F_GETLK64:
11730             ret = copyfrom(&fl, arg3);
11731             if (ret) {
11732                 break;
11733             }
11734             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11735             if (ret == 0) {
11736                 ret = copyto(arg3, &fl);
11737             }
11738 	    break;
11739 
11740         case TARGET_F_SETLK64:
11741         case TARGET_F_SETLKW64:
11742             ret = copyfrom(&fl, arg3);
11743             if (ret) {
11744                 break;
11745             }
11746             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11747 	    break;
11748         default:
11749             ret = do_fcntl(arg1, arg2, arg3);
11750             break;
11751         }
11752         return ret;
11753     }
11754 #endif
11755 #ifdef TARGET_NR_cacheflush
11756     case TARGET_NR_cacheflush:
11757         /* self-modifying code is handled automatically, so nothing needed */
11758         return 0;
11759 #endif
11760 #ifdef TARGET_NR_getpagesize
11761     case TARGET_NR_getpagesize:
11762         return TARGET_PAGE_SIZE;
11763 #endif
11764     case TARGET_NR_gettid:
11765         return get_errno(sys_gettid());
11766 #ifdef TARGET_NR_readahead
11767     case TARGET_NR_readahead:
11768 #if TARGET_ABI_BITS == 32
11769         if (regpairs_aligned(cpu_env, num)) {
11770             arg2 = arg3;
11771             arg3 = arg4;
11772             arg4 = arg5;
11773         }
11774         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11775 #else
11776         ret = get_errno(readahead(arg1, arg2, arg3));
11777 #endif
11778         return ret;
11779 #endif
11780 #ifdef CONFIG_ATTR
11781 #ifdef TARGET_NR_setxattr
11782     case TARGET_NR_listxattr:
11783     case TARGET_NR_llistxattr:
11784     {
11785         void *p, *b = 0;
11786         if (arg2) {
11787             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11788             if (!b) {
11789                 return -TARGET_EFAULT;
11790             }
11791         }
11792         p = lock_user_string(arg1);
11793         if (p) {
11794             if (num == TARGET_NR_listxattr) {
11795                 ret = get_errno(listxattr(p, b, arg3));
11796             } else {
11797                 ret = get_errno(llistxattr(p, b, arg3));
11798             }
11799         } else {
11800             ret = -TARGET_EFAULT;
11801         }
11802         unlock_user(p, arg1, 0);
11803         unlock_user(b, arg2, arg3);
11804         return ret;
11805     }
11806     case TARGET_NR_flistxattr:
11807     {
11808         void *b = 0;
11809         if (arg2) {
11810             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11811             if (!b) {
11812                 return -TARGET_EFAULT;
11813             }
11814         }
11815         ret = get_errno(flistxattr(arg1, b, arg3));
11816         unlock_user(b, arg2, arg3);
11817         return ret;
11818     }
11819     case TARGET_NR_setxattr:
11820     case TARGET_NR_lsetxattr:
11821         {
11822             void *p, *n, *v = 0;
11823             if (arg3) {
11824                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11825                 if (!v) {
11826                     return -TARGET_EFAULT;
11827                 }
11828             }
11829             p = lock_user_string(arg1);
11830             n = lock_user_string(arg2);
11831             if (p && n) {
11832                 if (num == TARGET_NR_setxattr) {
11833                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11834                 } else {
11835                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11836                 }
11837             } else {
11838                 ret = -TARGET_EFAULT;
11839             }
11840             unlock_user(p, arg1, 0);
11841             unlock_user(n, arg2, 0);
11842             unlock_user(v, arg3, 0);
11843         }
11844         return ret;
11845     case TARGET_NR_fsetxattr:
11846         {
11847             void *n, *v = 0;
11848             if (arg3) {
11849                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11850                 if (!v) {
11851                     return -TARGET_EFAULT;
11852                 }
11853             }
11854             n = lock_user_string(arg2);
11855             if (n) {
11856                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11857             } else {
11858                 ret = -TARGET_EFAULT;
11859             }
11860             unlock_user(n, arg2, 0);
11861             unlock_user(v, arg3, 0);
11862         }
11863         return ret;
11864     case TARGET_NR_getxattr:
11865     case TARGET_NR_lgetxattr:
11866         {
11867             void *p, *n, *v = 0;
11868             if (arg3) {
11869                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11870                 if (!v) {
11871                     return -TARGET_EFAULT;
11872                 }
11873             }
11874             p = lock_user_string(arg1);
11875             n = lock_user_string(arg2);
11876             if (p && n) {
11877                 if (num == TARGET_NR_getxattr) {
11878                     ret = get_errno(getxattr(p, n, v, arg4));
11879                 } else {
11880                     ret = get_errno(lgetxattr(p, n, v, arg4));
11881                 }
11882             } else {
11883                 ret = -TARGET_EFAULT;
11884             }
11885             unlock_user(p, arg1, 0);
11886             unlock_user(n, arg2, 0);
11887             unlock_user(v, arg3, arg4);
11888         }
11889         return ret;
11890     case TARGET_NR_fgetxattr:
11891         {
11892             void *n, *v = 0;
11893             if (arg3) {
11894                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11895                 if (!v) {
11896                     return -TARGET_EFAULT;
11897                 }
11898             }
11899             n = lock_user_string(arg2);
11900             if (n) {
11901                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11902             } else {
11903                 ret = -TARGET_EFAULT;
11904             }
11905             unlock_user(n, arg2, 0);
11906             unlock_user(v, arg3, arg4);
11907         }
11908         return ret;
11909     case TARGET_NR_removexattr:
11910     case TARGET_NR_lremovexattr:
11911         {
11912             void *p, *n;
11913             p = lock_user_string(arg1);
11914             n = lock_user_string(arg2);
11915             if (p && n) {
11916                 if (num == TARGET_NR_removexattr) {
11917                     ret = get_errno(removexattr(p, n));
11918                 } else {
11919                     ret = get_errno(lremovexattr(p, n));
11920                 }
11921             } else {
11922                 ret = -TARGET_EFAULT;
11923             }
11924             unlock_user(p, arg1, 0);
11925             unlock_user(n, arg2, 0);
11926         }
11927         return ret;
11928     case TARGET_NR_fremovexattr:
11929         {
11930             void *n;
11931             n = lock_user_string(arg2);
11932             if (n) {
11933                 ret = get_errno(fremovexattr(arg1, n));
11934             } else {
11935                 ret = -TARGET_EFAULT;
11936             }
11937             unlock_user(n, arg2, 0);
11938         }
11939         return ret;
11940 #endif
11941 #endif /* CONFIG_ATTR */
11942 #ifdef TARGET_NR_set_thread_area
11943     case TARGET_NR_set_thread_area:
11944 #if defined(TARGET_MIPS)
11945       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11946       return 0;
11947 #elif defined(TARGET_CRIS)
11948       if (arg1 & 0xff)
11949           ret = -TARGET_EINVAL;
11950       else {
11951           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11952           ret = 0;
11953       }
11954       return ret;
11955 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11956       return do_set_thread_area(cpu_env, arg1);
11957 #elif defined(TARGET_M68K)
11958       {
11959           TaskState *ts = cpu->opaque;
11960           ts->tp_value = arg1;
11961           return 0;
11962       }
11963 #else
11964       return -TARGET_ENOSYS;
11965 #endif
11966 #endif
11967 #ifdef TARGET_NR_get_thread_area
11968     case TARGET_NR_get_thread_area:
11969 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11970         return do_get_thread_area(cpu_env, arg1);
11971 #elif defined(TARGET_M68K)
11972         {
11973             TaskState *ts = cpu->opaque;
11974             return ts->tp_value;
11975         }
11976 #else
11977         return -TARGET_ENOSYS;
11978 #endif
11979 #endif
11980 #ifdef TARGET_NR_getdomainname
11981     case TARGET_NR_getdomainname:
11982         return -TARGET_ENOSYS;
11983 #endif
11984 
11985 #ifdef TARGET_NR_clock_settime
11986     case TARGET_NR_clock_settime:
11987     {
11988         struct timespec ts;
11989 
11990         ret = target_to_host_timespec(&ts, arg2);
11991         if (!is_error(ret)) {
11992             ret = get_errno(clock_settime(arg1, &ts));
11993         }
11994         return ret;
11995     }
11996 #endif
11997 #ifdef TARGET_NR_clock_settime64
11998     case TARGET_NR_clock_settime64:
11999     {
12000         struct timespec ts;
12001 
12002         ret = target_to_host_timespec64(&ts, arg2);
12003         if (!is_error(ret)) {
12004             ret = get_errno(clock_settime(arg1, &ts));
12005         }
12006         return ret;
12007     }
12008 #endif
12009 #ifdef TARGET_NR_clock_gettime
12010     case TARGET_NR_clock_gettime:
12011     {
12012         struct timespec ts;
12013         ret = get_errno(clock_gettime(arg1, &ts));
12014         if (!is_error(ret)) {
12015             ret = host_to_target_timespec(arg2, &ts);
12016         }
12017         return ret;
12018     }
12019 #endif
12020 #ifdef TARGET_NR_clock_gettime64
12021     case TARGET_NR_clock_gettime64:
12022     {
12023         struct timespec ts;
12024         ret = get_errno(clock_gettime(arg1, &ts));
12025         if (!is_error(ret)) {
12026             ret = host_to_target_timespec64(arg2, &ts);
12027         }
12028         return ret;
12029     }
12030 #endif
12031 #ifdef TARGET_NR_clock_getres
12032     case TARGET_NR_clock_getres:
12033     {
12034         struct timespec ts;
12035         ret = get_errno(clock_getres(arg1, &ts));
12036         if (!is_error(ret)) {
12037             host_to_target_timespec(arg2, &ts);
12038         }
12039         return ret;
12040     }
12041 #endif
12042 #ifdef TARGET_NR_clock_getres_time64
12043     case TARGET_NR_clock_getres_time64:
12044     {
12045         struct timespec ts;
12046         ret = get_errno(clock_getres(arg1, &ts));
12047         if (!is_error(ret)) {
12048             host_to_target_timespec64(arg2, &ts);
12049         }
12050         return ret;
12051     }
12052 #endif
12053 #ifdef TARGET_NR_clock_nanosleep
12054     case TARGET_NR_clock_nanosleep:
12055     {
12056         struct timespec ts;
12057         if (target_to_host_timespec(&ts, arg3)) {
12058             return -TARGET_EFAULT;
12059         }
12060         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12061                                              &ts, arg4 ? &ts : NULL));
12062         /*
12063          * if the call is interrupted by a signal handler, it fails
12064          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12065          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12066          */
12067         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12068             host_to_target_timespec(arg4, &ts)) {
12069               return -TARGET_EFAULT;
12070         }
12071 
12072         return ret;
12073     }
12074 #endif
12075 #ifdef TARGET_NR_clock_nanosleep_time64
12076     case TARGET_NR_clock_nanosleep_time64:
12077     {
12078         struct timespec ts;
12079 
12080         if (target_to_host_timespec64(&ts, arg3)) {
12081             return -TARGET_EFAULT;
12082         }
12083 
12084         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12085                                              &ts, arg4 ? &ts : NULL));
12086 
12087         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12088             host_to_target_timespec64(arg4, &ts)) {
12089             return -TARGET_EFAULT;
12090         }
12091         return ret;
12092     }
12093 #endif
12094 
12095 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12096     case TARGET_NR_set_tid_address:
12097         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12098 #endif
12099 
12100     case TARGET_NR_tkill:
12101         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12102 
12103     case TARGET_NR_tgkill:
12104         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12105                          target_to_host_signal(arg3)));
12106 
12107 #ifdef TARGET_NR_set_robust_list
12108     case TARGET_NR_set_robust_list:
12109     case TARGET_NR_get_robust_list:
12110         /* The ABI for supporting robust futexes has userspace pass
12111          * the kernel a pointer to a linked list which is updated by
12112          * userspace after the syscall; the list is walked by the kernel
12113          * when the thread exits. Since the linked list in QEMU guest
12114          * memory isn't a valid linked list for the host and we have
12115          * no way to reliably intercept the thread-death event, we can't
12116          * support these. Silently return ENOSYS so that guest userspace
12117          * falls back to a non-robust futex implementation (which should
12118          * be OK except in the corner case of the guest crashing while
12119          * holding a mutex that is shared with another process via
12120          * shared memory).
12121          */
12122         return -TARGET_ENOSYS;
12123 #endif
12124 
12125 #if defined(TARGET_NR_utimensat)
12126     case TARGET_NR_utimensat:
12127         {
12128             struct timespec *tsp, ts[2];
12129             if (!arg3) {
12130                 tsp = NULL;
12131             } else {
12132                 if (target_to_host_timespec(ts, arg3)) {
12133                     return -TARGET_EFAULT;
12134                 }
12135                 if (target_to_host_timespec(ts + 1, arg3 +
12136                                             sizeof(struct target_timespec))) {
12137                     return -TARGET_EFAULT;
12138                 }
12139                 tsp = ts;
12140             }
12141             if (!arg2)
12142                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12143             else {
12144                 if (!(p = lock_user_string(arg2))) {
12145                     return -TARGET_EFAULT;
12146                 }
12147                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12148                 unlock_user(p, arg2, 0);
12149             }
12150         }
12151         return ret;
12152 #endif
12153 #ifdef TARGET_NR_utimensat_time64
12154     case TARGET_NR_utimensat_time64:
12155         {
12156             struct timespec *tsp, ts[2];
12157             if (!arg3) {
12158                 tsp = NULL;
12159             } else {
12160                 if (target_to_host_timespec64(ts, arg3)) {
12161                     return -TARGET_EFAULT;
12162                 }
12163                 if (target_to_host_timespec64(ts + 1, arg3 +
12164                                      sizeof(struct target__kernel_timespec))) {
12165                     return -TARGET_EFAULT;
12166                 }
12167                 tsp = ts;
12168             }
12169             if (!arg2)
12170                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12171             else {
12172                 p = lock_user_string(arg2);
12173                 if (!p) {
12174                     return -TARGET_EFAULT;
12175                 }
12176                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12177                 unlock_user(p, arg2, 0);
12178             }
12179         }
12180         return ret;
12181 #endif
12182 #ifdef TARGET_NR_futex
12183     case TARGET_NR_futex:
12184         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12185 #endif
12186 #ifdef TARGET_NR_futex_time64
12187     case TARGET_NR_futex_time64:
12188         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12189 #endif
12190 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12191     case TARGET_NR_inotify_init:
12192         ret = get_errno(sys_inotify_init());
12193         if (ret >= 0) {
12194             fd_trans_register(ret, &target_inotify_trans);
12195         }
12196         return ret;
12197 #endif
12198 #ifdef CONFIG_INOTIFY1
12199 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12200     case TARGET_NR_inotify_init1:
12201         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12202                                           fcntl_flags_tbl)));
12203         if (ret >= 0) {
12204             fd_trans_register(ret, &target_inotify_trans);
12205         }
12206         return ret;
12207 #endif
12208 #endif
12209 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12210     case TARGET_NR_inotify_add_watch:
12211         p = lock_user_string(arg2);
12212         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12213         unlock_user(p, arg2, 0);
12214         return ret;
12215 #endif
12216 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12217     case TARGET_NR_inotify_rm_watch:
12218         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12219 #endif
12220 
12221 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12222     case TARGET_NR_mq_open:
12223         {
12224             struct mq_attr posix_mq_attr;
12225             struct mq_attr *pposix_mq_attr;
12226             int host_flags;
12227 
12228             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12229             pposix_mq_attr = NULL;
12230             if (arg4) {
12231                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12232                     return -TARGET_EFAULT;
12233                 }
12234                 pposix_mq_attr = &posix_mq_attr;
12235             }
12236             p = lock_user_string(arg1 - 1);
12237             if (!p) {
12238                 return -TARGET_EFAULT;
12239             }
12240             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12241             unlock_user (p, arg1, 0);
12242         }
12243         return ret;
12244 
12245     case TARGET_NR_mq_unlink:
12246         p = lock_user_string(arg1 - 1);
12247         if (!p) {
12248             return -TARGET_EFAULT;
12249         }
12250         ret = get_errno(mq_unlink(p));
12251         unlock_user (p, arg1, 0);
12252         return ret;
12253 
12254 #ifdef TARGET_NR_mq_timedsend
12255     case TARGET_NR_mq_timedsend:
12256         {
12257             struct timespec ts;
12258 
12259             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12260             if (arg5 != 0) {
12261                 if (target_to_host_timespec(&ts, arg5)) {
12262                     return -TARGET_EFAULT;
12263                 }
12264                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12265                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12266                     return -TARGET_EFAULT;
12267                 }
12268             } else {
12269                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12270             }
12271             unlock_user (p, arg2, arg3);
12272         }
12273         return ret;
12274 #endif
12275 #ifdef TARGET_NR_mq_timedsend_time64
12276     case TARGET_NR_mq_timedsend_time64:
12277         {
12278             struct timespec ts;
12279 
12280             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12281             if (arg5 != 0) {
12282                 if (target_to_host_timespec64(&ts, arg5)) {
12283                     return -TARGET_EFAULT;
12284                 }
12285                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12286                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12287                     return -TARGET_EFAULT;
12288                 }
12289             } else {
12290                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12291             }
12292             unlock_user(p, arg2, arg3);
12293         }
12294         return ret;
12295 #endif
12296 
12297 #ifdef TARGET_NR_mq_timedreceive
12298     case TARGET_NR_mq_timedreceive:
12299         {
12300             struct timespec ts;
12301             unsigned int prio;
12302 
12303             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12304             if (arg5 != 0) {
12305                 if (target_to_host_timespec(&ts, arg5)) {
12306                     return -TARGET_EFAULT;
12307                 }
12308                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12309                                                      &prio, &ts));
12310                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12311                     return -TARGET_EFAULT;
12312                 }
12313             } else {
12314                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12315                                                      &prio, NULL));
12316             }
12317             unlock_user (p, arg2, arg3);
12318             if (arg4 != 0)
12319                 put_user_u32(prio, arg4);
12320         }
12321         return ret;
12322 #endif
12323 #ifdef TARGET_NR_mq_timedreceive_time64
12324     case TARGET_NR_mq_timedreceive_time64:
12325         {
12326             struct timespec ts;
12327             unsigned int prio;
12328 
12329             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12330             if (arg5 != 0) {
12331                 if (target_to_host_timespec64(&ts, arg5)) {
12332                     return -TARGET_EFAULT;
12333                 }
12334                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12335                                                      &prio, &ts));
12336                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12337                     return -TARGET_EFAULT;
12338                 }
12339             } else {
12340                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12341                                                      &prio, NULL));
12342             }
12343             unlock_user(p, arg2, arg3);
12344             if (arg4 != 0) {
12345                 put_user_u32(prio, arg4);
12346             }
12347         }
12348         return ret;
12349 #endif
12350 
12351     /* Not implemented for now... */
12352 /*     case TARGET_NR_mq_notify: */
12353 /*         break; */
12354 
12355     case TARGET_NR_mq_getsetattr:
12356         {
12357             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12358             ret = 0;
12359             if (arg2 != 0) {
12360                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12361                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12362                                            &posix_mq_attr_out));
12363             } else if (arg3 != 0) {
12364                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12365             }
12366             if (ret == 0 && arg3 != 0) {
12367                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12368             }
12369         }
12370         return ret;
12371 #endif
12372 
12373 #ifdef CONFIG_SPLICE
12374 #ifdef TARGET_NR_tee
12375     case TARGET_NR_tee:
12376         {
12377             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12378         }
12379         return ret;
12380 #endif
12381 #ifdef TARGET_NR_splice
12382     case TARGET_NR_splice:
12383         {
12384             loff_t loff_in, loff_out;
12385             loff_t *ploff_in = NULL, *ploff_out = NULL;
12386             if (arg2) {
12387                 if (get_user_u64(loff_in, arg2)) {
12388                     return -TARGET_EFAULT;
12389                 }
12390                 ploff_in = &loff_in;
12391             }
12392             if (arg4) {
12393                 if (get_user_u64(loff_out, arg4)) {
12394                     return -TARGET_EFAULT;
12395                 }
12396                 ploff_out = &loff_out;
12397             }
12398             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12399             if (arg2) {
12400                 if (put_user_u64(loff_in, arg2)) {
12401                     return -TARGET_EFAULT;
12402                 }
12403             }
12404             if (arg4) {
12405                 if (put_user_u64(loff_out, arg4)) {
12406                     return -TARGET_EFAULT;
12407                 }
12408             }
12409         }
12410         return ret;
12411 #endif
12412 #ifdef TARGET_NR_vmsplice
12413 	case TARGET_NR_vmsplice:
12414         {
12415             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12416             if (vec != NULL) {
12417                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12418                 unlock_iovec(vec, arg2, arg3, 0);
12419             } else {
12420                 ret = -host_to_target_errno(errno);
12421             }
12422         }
12423         return ret;
12424 #endif
12425 #endif /* CONFIG_SPLICE */
12426 #ifdef CONFIG_EVENTFD
12427 #if defined(TARGET_NR_eventfd)
12428     case TARGET_NR_eventfd:
12429         ret = get_errno(eventfd(arg1, 0));
12430         if (ret >= 0) {
12431             fd_trans_register(ret, &target_eventfd_trans);
12432         }
12433         return ret;
12434 #endif
12435 #if defined(TARGET_NR_eventfd2)
12436     case TARGET_NR_eventfd2:
12437     {
12438         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12439         if (arg2 & TARGET_O_NONBLOCK) {
12440             host_flags |= O_NONBLOCK;
12441         }
12442         if (arg2 & TARGET_O_CLOEXEC) {
12443             host_flags |= O_CLOEXEC;
12444         }
12445         ret = get_errno(eventfd(arg1, host_flags));
12446         if (ret >= 0) {
12447             fd_trans_register(ret, &target_eventfd_trans);
12448         }
12449         return ret;
12450     }
12451 #endif
12452 #endif /* CONFIG_EVENTFD  */
12453 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12454     case TARGET_NR_fallocate:
12455 #if TARGET_ABI_BITS == 32
12456         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12457                                   target_offset64(arg5, arg6)));
12458 #else
12459         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12460 #endif
12461         return ret;
12462 #endif
12463 #if defined(CONFIG_SYNC_FILE_RANGE)
12464 #if defined(TARGET_NR_sync_file_range)
12465     case TARGET_NR_sync_file_range:
12466 #if TARGET_ABI_BITS == 32
12467 #if defined(TARGET_MIPS)
12468         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12469                                         target_offset64(arg5, arg6), arg7));
12470 #else
12471         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12472                                         target_offset64(arg4, arg5), arg6));
12473 #endif /* !TARGET_MIPS */
12474 #else
12475         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12476 #endif
12477         return ret;
12478 #endif
12479 #if defined(TARGET_NR_sync_file_range2) || \
12480     defined(TARGET_NR_arm_sync_file_range)
12481 #if defined(TARGET_NR_sync_file_range2)
12482     case TARGET_NR_sync_file_range2:
12483 #endif
12484 #if defined(TARGET_NR_arm_sync_file_range)
12485     case TARGET_NR_arm_sync_file_range:
12486 #endif
12487         /* This is like sync_file_range but the arguments are reordered */
12488 #if TARGET_ABI_BITS == 32
12489         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12490                                         target_offset64(arg5, arg6), arg2));
12491 #else
12492         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12493 #endif
12494         return ret;
12495 #endif
12496 #endif
12497 #if defined(TARGET_NR_signalfd4)
12498     case TARGET_NR_signalfd4:
12499         return do_signalfd4(arg1, arg2, arg4);
12500 #endif
12501 #if defined(TARGET_NR_signalfd)
12502     case TARGET_NR_signalfd:
12503         return do_signalfd4(arg1, arg2, 0);
12504 #endif
12505 #if defined(CONFIG_EPOLL)
12506 #if defined(TARGET_NR_epoll_create)
12507     case TARGET_NR_epoll_create:
12508         return get_errno(epoll_create(arg1));
12509 #endif
12510 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12511     case TARGET_NR_epoll_create1:
12512         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12513 #endif
12514 #if defined(TARGET_NR_epoll_ctl)
12515     case TARGET_NR_epoll_ctl:
12516     {
12517         struct epoll_event ep;
12518         struct epoll_event *epp = 0;
12519         if (arg4) {
12520             if (arg2 != EPOLL_CTL_DEL) {
12521                 struct target_epoll_event *target_ep;
12522                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12523                     return -TARGET_EFAULT;
12524                 }
12525                 ep.events = tswap32(target_ep->events);
12526                 /*
12527                  * The epoll_data_t union is just opaque data to the kernel,
12528                  * so we transfer all 64 bits across and need not worry what
12529                  * actual data type it is.
12530                  */
12531                 ep.data.u64 = tswap64(target_ep->data.u64);
12532                 unlock_user_struct(target_ep, arg4, 0);
12533             }
12534             /*
12535              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12536              * non-null pointer, even though this argument is ignored.
12537              *
12538              */
12539             epp = &ep;
12540         }
12541         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12542     }
12543 #endif
12544 
12545 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12546 #if defined(TARGET_NR_epoll_wait)
12547     case TARGET_NR_epoll_wait:
12548 #endif
12549 #if defined(TARGET_NR_epoll_pwait)
12550     case TARGET_NR_epoll_pwait:
12551 #endif
12552     {
12553         struct target_epoll_event *target_ep;
12554         struct epoll_event *ep;
12555         int epfd = arg1;
12556         int maxevents = arg3;
12557         int timeout = arg4;
12558 
12559         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12560             return -TARGET_EINVAL;
12561         }
12562 
12563         target_ep = lock_user(VERIFY_WRITE, arg2,
12564                               maxevents * sizeof(struct target_epoll_event), 1);
12565         if (!target_ep) {
12566             return -TARGET_EFAULT;
12567         }
12568 
12569         ep = g_try_new(struct epoll_event, maxevents);
12570         if (!ep) {
12571             unlock_user(target_ep, arg2, 0);
12572             return -TARGET_ENOMEM;
12573         }
12574 
12575         switch (num) {
12576 #if defined(TARGET_NR_epoll_pwait)
12577         case TARGET_NR_epoll_pwait:
12578         {
12579             target_sigset_t *target_set;
12580             sigset_t _set, *set = &_set;
12581 
12582             if (arg5) {
12583                 if (arg6 != sizeof(target_sigset_t)) {
12584                     ret = -TARGET_EINVAL;
12585                     break;
12586                 }
12587 
12588                 target_set = lock_user(VERIFY_READ, arg5,
12589                                        sizeof(target_sigset_t), 1);
12590                 if (!target_set) {
12591                     ret = -TARGET_EFAULT;
12592                     break;
12593                 }
12594                 target_to_host_sigset(set, target_set);
12595                 unlock_user(target_set, arg5, 0);
12596             } else {
12597                 set = NULL;
12598             }
12599 
12600             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12601                                              set, SIGSET_T_SIZE));
12602             break;
12603         }
12604 #endif
12605 #if defined(TARGET_NR_epoll_wait)
12606         case TARGET_NR_epoll_wait:
12607             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12608                                              NULL, 0));
12609             break;
12610 #endif
12611         default:
12612             ret = -TARGET_ENOSYS;
12613         }
12614         if (!is_error(ret)) {
12615             int i;
12616             for (i = 0; i < ret; i++) {
12617                 target_ep[i].events = tswap32(ep[i].events);
12618                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12619             }
12620             unlock_user(target_ep, arg2,
12621                         ret * sizeof(struct target_epoll_event));
12622         } else {
12623             unlock_user(target_ep, arg2, 0);
12624         }
12625         g_free(ep);
12626         return ret;
12627     }
12628 #endif
12629 #endif
12630 #ifdef TARGET_NR_prlimit64
12631     case TARGET_NR_prlimit64:
12632     {
12633         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12634         struct target_rlimit64 *target_rnew, *target_rold;
12635         struct host_rlimit64 rnew, rold, *rnewp = 0;
12636         int resource = target_to_host_resource(arg2);
12637 
12638         if (arg3 && (resource != RLIMIT_AS &&
12639                      resource != RLIMIT_DATA &&
12640                      resource != RLIMIT_STACK)) {
12641             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12642                 return -TARGET_EFAULT;
12643             }
12644             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12645             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12646             unlock_user_struct(target_rnew, arg3, 0);
12647             rnewp = &rnew;
12648         }
12649 
12650         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12651         if (!is_error(ret) && arg4) {
12652             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12653                 return -TARGET_EFAULT;
12654             }
12655             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12656             target_rold->rlim_max = tswap64(rold.rlim_max);
12657             unlock_user_struct(target_rold, arg4, 1);
12658         }
12659         return ret;
12660     }
12661 #endif
12662 #ifdef TARGET_NR_gethostname
12663     case TARGET_NR_gethostname:
12664     {
12665         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12666         if (name) {
12667             ret = get_errno(gethostname(name, arg2));
12668             unlock_user(name, arg1, arg2);
12669         } else {
12670             ret = -TARGET_EFAULT;
12671         }
12672         return ret;
12673     }
12674 #endif
12675 #ifdef TARGET_NR_atomic_cmpxchg_32
12676     case TARGET_NR_atomic_cmpxchg_32:
12677     {
12678         /* should use start_exclusive from main.c */
12679         abi_ulong mem_value;
12680         if (get_user_u32(mem_value, arg6)) {
12681             target_siginfo_t info;
12682             info.si_signo = SIGSEGV;
12683             info.si_errno = 0;
12684             info.si_code = TARGET_SEGV_MAPERR;
12685             info._sifields._sigfault._addr = arg6;
12686             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12687                          QEMU_SI_FAULT, &info);
12688             ret = 0xdeadbeef;
12689 
12690         }
12691         if (mem_value == arg2)
12692             put_user_u32(arg1, arg6);
12693         return mem_value;
12694     }
12695 #endif
12696 #ifdef TARGET_NR_atomic_barrier
12697     case TARGET_NR_atomic_barrier:
12698         /* Like the kernel implementation and the
12699            qemu arm barrier, no-op this? */
12700         return 0;
12701 #endif
12702 
12703 #ifdef TARGET_NR_timer_create
12704     case TARGET_NR_timer_create:
12705     {
12706         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12707 
12708         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12709 
12710         int clkid = arg1;
12711         int timer_index = next_free_host_timer();
12712 
12713         if (timer_index < 0) {
12714             ret = -TARGET_EAGAIN;
12715         } else {
12716             timer_t *phtimer = g_posix_timers  + timer_index;
12717 
12718             if (arg2) {
12719                 phost_sevp = &host_sevp;
12720                 ret = target_to_host_sigevent(phost_sevp, arg2);
12721                 if (ret != 0) {
12722                     return ret;
12723                 }
12724             }
12725 
12726             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12727             if (ret) {
12728                 phtimer = NULL;
12729             } else {
12730                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12731                     return -TARGET_EFAULT;
12732                 }
12733             }
12734         }
12735         return ret;
12736     }
12737 #endif
12738 
12739 #ifdef TARGET_NR_timer_settime
12740     case TARGET_NR_timer_settime:
12741     {
12742         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12743          * struct itimerspec * old_value */
12744         target_timer_t timerid = get_timer_id(arg1);
12745 
12746         if (timerid < 0) {
12747             ret = timerid;
12748         } else if (arg3 == 0) {
12749             ret = -TARGET_EINVAL;
12750         } else {
12751             timer_t htimer = g_posix_timers[timerid];
12752             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12753 
12754             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12755                 return -TARGET_EFAULT;
12756             }
12757             ret = get_errno(
12758                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12759             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12760                 return -TARGET_EFAULT;
12761             }
12762         }
12763         return ret;
12764     }
12765 #endif
12766 
12767 #ifdef TARGET_NR_timer_settime64
12768     case TARGET_NR_timer_settime64:
12769     {
12770         target_timer_t timerid = get_timer_id(arg1);
12771 
12772         if (timerid < 0) {
12773             ret = timerid;
12774         } else if (arg3 == 0) {
12775             ret = -TARGET_EINVAL;
12776         } else {
12777             timer_t htimer = g_posix_timers[timerid];
12778             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12779 
12780             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12781                 return -TARGET_EFAULT;
12782             }
12783             ret = get_errno(
12784                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12785             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12786                 return -TARGET_EFAULT;
12787             }
12788         }
12789         return ret;
12790     }
12791 #endif
12792 
12793 #ifdef TARGET_NR_timer_gettime
12794     case TARGET_NR_timer_gettime:
12795     {
12796         /* args: timer_t timerid, struct itimerspec *curr_value */
12797         target_timer_t timerid = get_timer_id(arg1);
12798 
12799         if (timerid < 0) {
12800             ret = timerid;
12801         } else if (!arg2) {
12802             ret = -TARGET_EFAULT;
12803         } else {
12804             timer_t htimer = g_posix_timers[timerid];
12805             struct itimerspec hspec;
12806             ret = get_errno(timer_gettime(htimer, &hspec));
12807 
12808             if (host_to_target_itimerspec(arg2, &hspec)) {
12809                 ret = -TARGET_EFAULT;
12810             }
12811         }
12812         return ret;
12813     }
12814 #endif
12815 
12816 #ifdef TARGET_NR_timer_gettime64
12817     case TARGET_NR_timer_gettime64:
12818     {
12819         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12820         target_timer_t timerid = get_timer_id(arg1);
12821 
12822         if (timerid < 0) {
12823             ret = timerid;
12824         } else if (!arg2) {
12825             ret = -TARGET_EFAULT;
12826         } else {
12827             timer_t htimer = g_posix_timers[timerid];
12828             struct itimerspec hspec;
12829             ret = get_errno(timer_gettime(htimer, &hspec));
12830 
12831             if (host_to_target_itimerspec64(arg2, &hspec)) {
12832                 ret = -TARGET_EFAULT;
12833             }
12834         }
12835         return ret;
12836     }
12837 #endif
12838 
12839 #ifdef TARGET_NR_timer_getoverrun
12840     case TARGET_NR_timer_getoverrun:
12841     {
12842         /* args: timer_t timerid */
12843         target_timer_t timerid = get_timer_id(arg1);
12844 
12845         if (timerid < 0) {
12846             ret = timerid;
12847         } else {
12848             timer_t htimer = g_posix_timers[timerid];
12849             ret = get_errno(timer_getoverrun(htimer));
12850         }
12851         return ret;
12852     }
12853 #endif
12854 
12855 #ifdef TARGET_NR_timer_delete
12856     case TARGET_NR_timer_delete:
12857     {
12858         /* args: timer_t timerid */
12859         target_timer_t timerid = get_timer_id(arg1);
12860 
12861         if (timerid < 0) {
12862             ret = timerid;
12863         } else {
12864             timer_t htimer = g_posix_timers[timerid];
12865             ret = get_errno(timer_delete(htimer));
12866             g_posix_timers[timerid] = 0;
12867         }
12868         return ret;
12869     }
12870 #endif
12871 
12872 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12873     case TARGET_NR_timerfd_create:
12874         return get_errno(timerfd_create(arg1,
12875                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12876 #endif
12877 
12878 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12879     case TARGET_NR_timerfd_gettime:
12880         {
12881             struct itimerspec its_curr;
12882 
12883             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12884 
12885             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12886                 return -TARGET_EFAULT;
12887             }
12888         }
12889         return ret;
12890 #endif
12891 
12892 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12893     case TARGET_NR_timerfd_gettime64:
12894         {
12895             struct itimerspec its_curr;
12896 
12897             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12898 
12899             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12900                 return -TARGET_EFAULT;
12901             }
12902         }
12903         return ret;
12904 #endif
12905 
12906 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12907     case TARGET_NR_timerfd_settime:
12908         {
12909             struct itimerspec its_new, its_old, *p_new;
12910 
12911             if (arg3) {
12912                 if (target_to_host_itimerspec(&its_new, arg3)) {
12913                     return -TARGET_EFAULT;
12914                 }
12915                 p_new = &its_new;
12916             } else {
12917                 p_new = NULL;
12918             }
12919 
12920             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12921 
12922             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12923                 return -TARGET_EFAULT;
12924             }
12925         }
12926         return ret;
12927 #endif
12928 
12929 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12930     case TARGET_NR_timerfd_settime64:
12931         {
12932             struct itimerspec its_new, its_old, *p_new;
12933 
12934             if (arg3) {
12935                 if (target_to_host_itimerspec64(&its_new, arg3)) {
12936                     return -TARGET_EFAULT;
12937                 }
12938                 p_new = &its_new;
12939             } else {
12940                 p_new = NULL;
12941             }
12942 
12943             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12944 
12945             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
12946                 return -TARGET_EFAULT;
12947             }
12948         }
12949         return ret;
12950 #endif
12951 
12952 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12953     case TARGET_NR_ioprio_get:
12954         return get_errno(ioprio_get(arg1, arg2));
12955 #endif
12956 
12957 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12958     case TARGET_NR_ioprio_set:
12959         return get_errno(ioprio_set(arg1, arg2, arg3));
12960 #endif
12961 
12962 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12963     case TARGET_NR_setns:
12964         return get_errno(setns(arg1, arg2));
12965 #endif
12966 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12967     case TARGET_NR_unshare:
12968         return get_errno(unshare(arg1));
12969 #endif
12970 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12971     case TARGET_NR_kcmp:
12972         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12973 #endif
12974 #ifdef TARGET_NR_swapcontext
12975     case TARGET_NR_swapcontext:
12976         /* PowerPC specific.  */
12977         return do_swapcontext(cpu_env, arg1, arg2, arg3);
12978 #endif
12979 #ifdef TARGET_NR_memfd_create
12980     case TARGET_NR_memfd_create:
12981         p = lock_user_string(arg1);
12982         if (!p) {
12983             return -TARGET_EFAULT;
12984         }
12985         ret = get_errno(memfd_create(p, arg2));
12986         fd_trans_unregister(ret);
12987         unlock_user(p, arg1, 0);
12988         return ret;
12989 #endif
12990 #if defined TARGET_NR_membarrier && defined __NR_membarrier
12991     case TARGET_NR_membarrier:
12992         return get_errno(membarrier(arg1, arg2));
12993 #endif
12994 
12995 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
12996     case TARGET_NR_copy_file_range:
12997         {
12998             loff_t inoff, outoff;
12999             loff_t *pinoff = NULL, *poutoff = NULL;
13000 
13001             if (arg2) {
13002                 if (get_user_u64(inoff, arg2)) {
13003                     return -TARGET_EFAULT;
13004                 }
13005                 pinoff = &inoff;
13006             }
13007             if (arg4) {
13008                 if (get_user_u64(outoff, arg4)) {
13009                     return -TARGET_EFAULT;
13010                 }
13011                 poutoff = &outoff;
13012             }
13013             /* Do not sign-extend the count parameter. */
13014             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13015                                                  (abi_ulong)arg5, arg6));
13016             if (!is_error(ret) && ret > 0) {
13017                 if (arg2) {
13018                     if (put_user_u64(inoff, arg2)) {
13019                         return -TARGET_EFAULT;
13020                     }
13021                 }
13022                 if (arg4) {
13023                     if (put_user_u64(outoff, arg4)) {
13024                         return -TARGET_EFAULT;
13025                     }
13026                 }
13027             }
13028         }
13029         return ret;
13030 #endif
13031 
13032 #if defined(TARGET_NR_pivot_root)
13033     case TARGET_NR_pivot_root:
13034         {
13035             void *p2;
13036             p = lock_user_string(arg1); /* new_root */
13037             p2 = lock_user_string(arg2); /* put_old */
13038             if (!p || !p2) {
13039                 ret = -TARGET_EFAULT;
13040             } else {
13041                 ret = get_errno(pivot_root(p, p2));
13042             }
13043             unlock_user(p2, arg2, 0);
13044             unlock_user(p, arg1, 0);
13045         }
13046         return ret;
13047 #endif
13048 
13049     default:
13050         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13051         return -TARGET_ENOSYS;
13052     }
13053     return ret;
13054 }
13055 
13056 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13057                     abi_long arg2, abi_long arg3, abi_long arg4,
13058                     abi_long arg5, abi_long arg6, abi_long arg7,
13059                     abi_long arg8)
13060 {
13061     CPUState *cpu = env_cpu(cpu_env);
13062     abi_long ret;
13063 
13064 #ifdef DEBUG_ERESTARTSYS
13065     /* Debug-only code for exercising the syscall-restart code paths
13066      * in the per-architecture cpu main loops: restart every syscall
13067      * the guest makes once before letting it through.
13068      */
13069     {
13070         static bool flag;
13071         flag = !flag;
13072         if (flag) {
13073             return -QEMU_ERESTARTSYS;
13074         }
13075     }
13076 #endif
13077 
13078     record_syscall_start(cpu, num, arg1,
13079                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13080 
13081     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13082         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13083     }
13084 
13085     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13086                       arg5, arg6, arg7, arg8);
13087 
13088     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13089         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13090                           arg3, arg4, arg5, arg6);
13091     }
13092 
13093     record_syscall_return(cpu, num, ret);
13094     return ret;
13095 }
13096