xref: /openbmc/qemu/linux-user/syscall.c (revision 5423e6d3)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "strace.h"
131 #include "signal-common.h"
132 #include "loader.h"
133 #include "user-mmap.h"
134 #include "qemu/guest-random.h"
135 #include "qemu/selfmap.h"
136 #include "user/syscall-trace.h"
137 #include "qapi/error.h"
138 #include "fd-trans.h"
139 #include "tcg/tcg.h"
140 
141 #ifndef CLONE_IO
142 #define CLONE_IO                0x80000000      /* Clone io context */
143 #endif
144 
145 /* We can't directly call the host clone syscall, because this will
146  * badly confuse libc (breaking mutexes, for example). So we must
147  * divide clone flags into:
148  *  * flag combinations that look like pthread_create()
149  *  * flag combinations that look like fork()
150  *  * flags we can implement within QEMU itself
151  *  * flags we can't support and will return an error for
152  */
153 /* For thread creation, all these flags must be present; for
154  * fork, none must be present.
155  */
156 #define CLONE_THREAD_FLAGS                              \
157     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
158      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
159 
160 /* These flags are ignored:
161  * CLONE_DETACHED is now ignored by the kernel;
162  * CLONE_IO is just an optimisation hint to the I/O scheduler
163  */
164 #define CLONE_IGNORED_FLAGS                     \
165     (CLONE_DETACHED | CLONE_IO)
166 
167 /* Flags for fork which we can implement within QEMU itself */
168 #define CLONE_OPTIONAL_FORK_FLAGS               \
169     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
170      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
171 
172 /* Flags for thread creation which we can implement within QEMU itself */
173 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
174     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
175      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
176 
177 #define CLONE_INVALID_FORK_FLAGS                                        \
178     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
179 
180 #define CLONE_INVALID_THREAD_FLAGS                                      \
181     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
182        CLONE_IGNORED_FLAGS))
183 
184 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
185  * have almost all been allocated. We cannot support any of
186  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
187  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
188  * The checks against the invalid thread masks above will catch these.
189  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
190  */
191 
192 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
193  * once. This exercises the codepaths for restart.
194  */
195 //#define DEBUG_ERESTARTSYS
196 
197 //#include <linux/msdos_fs.h>
198 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
199 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
200 
201 #undef _syscall0
202 #undef _syscall1
203 #undef _syscall2
204 #undef _syscall3
205 #undef _syscall4
206 #undef _syscall5
207 #undef _syscall6
208 
209 #define _syscall0(type,name)		\
210 static type name (void)			\
211 {					\
212 	return syscall(__NR_##name);	\
213 }
214 
215 #define _syscall1(type,name,type1,arg1)		\
216 static type name (type1 arg1)			\
217 {						\
218 	return syscall(__NR_##name, arg1);	\
219 }
220 
221 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
222 static type name (type1 arg1,type2 arg2)		\
223 {							\
224 	return syscall(__NR_##name, arg1, arg2);	\
225 }
226 
227 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
228 static type name (type1 arg1,type2 arg2,type3 arg3)		\
229 {								\
230 	return syscall(__NR_##name, arg1, arg2, arg3);		\
231 }
232 
233 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
234 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
235 {										\
236 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
237 }
238 
239 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
240 		  type5,arg5)							\
241 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
242 {										\
243 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
244 }
245 
246 
247 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
248 		  type5,arg5,type6,arg6)					\
249 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
250                   type6 arg6)							\
251 {										\
252 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
253 }
254 
255 
256 #define __NR_sys_uname __NR_uname
257 #define __NR_sys_getcwd1 __NR_getcwd
258 #define __NR_sys_getdents __NR_getdents
259 #define __NR_sys_getdents64 __NR_getdents64
260 #define __NR_sys_getpriority __NR_getpriority
261 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
262 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
263 #define __NR_sys_syslog __NR_syslog
264 #if defined(__NR_futex)
265 # define __NR_sys_futex __NR_futex
266 #endif
267 #if defined(__NR_futex_time64)
268 # define __NR_sys_futex_time64 __NR_futex_time64
269 #endif
270 #define __NR_sys_inotify_init __NR_inotify_init
271 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
272 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
273 #define __NR_sys_statx __NR_statx
274 
275 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
276 #define __NR__llseek __NR_lseek
277 #endif
278 
279 /* Newer kernel ports have llseek() instead of _llseek() */
280 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
281 #define TARGET_NR__llseek TARGET_NR_llseek
282 #endif
283 
284 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
285 #ifndef TARGET_O_NONBLOCK_MASK
286 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
287 #endif
288 
289 #define __NR_sys_gettid __NR_gettid
290 _syscall0(int, sys_gettid)
291 
292 /* For the 64-bit guest on 32-bit host case we must emulate
293  * getdents using getdents64, because otherwise the host
294  * might hand us back more dirent records than we can fit
295  * into the guest buffer after structure format conversion.
296  * Otherwise we emulate getdents with getdents if the host has it.
297  */
298 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
299 #define EMULATE_GETDENTS_WITH_GETDENTS
300 #endif
301 
302 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
303 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
304 #endif
305 #if (defined(TARGET_NR_getdents) && \
306       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
307     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
308 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
309 #endif
310 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
311 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
312           loff_t *, res, uint, wh);
313 #endif
314 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
315 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
316           siginfo_t *, uinfo)
317 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
318 #ifdef __NR_exit_group
319 _syscall1(int,exit_group,int,error_code)
320 #endif
321 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
322 _syscall1(int,set_tid_address,int *,tidptr)
323 #endif
324 #if defined(__NR_futex)
325 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
326           const struct timespec *,timeout,int *,uaddr2,int,val3)
327 #endif
328 #if defined(__NR_futex_time64)
329 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
330           const struct timespec *,timeout,int *,uaddr2,int,val3)
331 #endif
332 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
333 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
334           unsigned long *, user_mask_ptr);
335 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
336 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
337           unsigned long *, user_mask_ptr);
338 #define __NR_sys_getcpu __NR_getcpu
339 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
340 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
341           void *, arg);
342 _syscall2(int, capget, struct __user_cap_header_struct *, header,
343           struct __user_cap_data_struct *, data);
344 _syscall2(int, capset, struct __user_cap_header_struct *, header,
345           struct __user_cap_data_struct *, data);
346 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
347 _syscall2(int, ioprio_get, int, which, int, who)
348 #endif
349 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
350 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
351 #endif
352 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
353 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
354 #endif
355 
356 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
357 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
358           unsigned long, idx1, unsigned long, idx2)
359 #endif
360 
361 /*
362  * It is assumed that struct statx is architecture independent.
363  */
364 #if defined(TARGET_NR_statx) && defined(__NR_statx)
365 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
366           unsigned int, mask, struct target_statx *, statxbuf)
367 #endif
368 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
369 _syscall2(int, membarrier, int, cmd, int, flags)
370 #endif
371 
372 static const bitmask_transtbl fcntl_flags_tbl[] = {
373   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
374   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
375   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
376   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
377   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
378   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
379   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
380   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
381   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
382   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
383   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
384   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
385   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
386 #if defined(O_DIRECT)
387   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
388 #endif
389 #if defined(O_NOATIME)
390   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
391 #endif
392 #if defined(O_CLOEXEC)
393   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
394 #endif
395 #if defined(O_PATH)
396   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
397 #endif
398 #if defined(O_TMPFILE)
399   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
400 #endif
401   /* Don't terminate the list prematurely on 64-bit host+guest.  */
402 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
403   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
404 #endif
405   { 0, 0, 0, 0 }
406 };
407 
408 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
409 
410 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
411 #if defined(__NR_utimensat)
412 #define __NR_sys_utimensat __NR_utimensat
413 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
414           const struct timespec *,tsp,int,flags)
415 #else
416 static int sys_utimensat(int dirfd, const char *pathname,
417                          const struct timespec times[2], int flags)
418 {
419     errno = ENOSYS;
420     return -1;
421 }
422 #endif
423 #endif /* TARGET_NR_utimensat */
424 
425 #ifdef TARGET_NR_renameat2
426 #if defined(__NR_renameat2)
427 #define __NR_sys_renameat2 __NR_renameat2
428 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
429           const char *, new, unsigned int, flags)
430 #else
431 static int sys_renameat2(int oldfd, const char *old,
432                          int newfd, const char *new, int flags)
433 {
434     if (flags == 0) {
435         return renameat(oldfd, old, newfd, new);
436     }
437     errno = ENOSYS;
438     return -1;
439 }
440 #endif
441 #endif /* TARGET_NR_renameat2 */
442 
443 #ifdef CONFIG_INOTIFY
444 #include <sys/inotify.h>
445 
446 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
447 static int sys_inotify_init(void)
448 {
449   return (inotify_init());
450 }
451 #endif
452 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
453 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
454 {
455   return (inotify_add_watch(fd, pathname, mask));
456 }
457 #endif
458 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
459 static int sys_inotify_rm_watch(int fd, int32_t wd)
460 {
461   return (inotify_rm_watch(fd, wd));
462 }
463 #endif
464 #ifdef CONFIG_INOTIFY1
465 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
466 static int sys_inotify_init1(int flags)
467 {
468   return (inotify_init1(flags));
469 }
470 #endif
471 #endif
472 #else
473 /* Userspace can usually survive runtime without inotify */
474 #undef TARGET_NR_inotify_init
475 #undef TARGET_NR_inotify_init1
476 #undef TARGET_NR_inotify_add_watch
477 #undef TARGET_NR_inotify_rm_watch
478 #endif /* CONFIG_INOTIFY  */
479 
480 #if defined(TARGET_NR_prlimit64)
481 #ifndef __NR_prlimit64
482 # define __NR_prlimit64 -1
483 #endif
484 #define __NR_sys_prlimit64 __NR_prlimit64
485 /* The glibc rlimit structure may not be that used by the underlying syscall */
486 struct host_rlimit64 {
487     uint64_t rlim_cur;
488     uint64_t rlim_max;
489 };
490 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
491           const struct host_rlimit64 *, new_limit,
492           struct host_rlimit64 *, old_limit)
493 #endif
494 
495 
496 #if defined(TARGET_NR_timer_create)
497 /* Maximum of 32 active POSIX timers allowed at any one time. */
498 static timer_t g_posix_timers[32] = { 0, } ;
499 
500 static inline int next_free_host_timer(void)
501 {
502     int k ;
503     /* FIXME: Does finding the next free slot require a lock? */
504     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
505         if (g_posix_timers[k] == 0) {
506             g_posix_timers[k] = (timer_t) 1;
507             return k;
508         }
509     }
510     return -1;
511 }
512 #endif
513 
514 static inline int host_to_target_errno(int host_errno)
515 {
516     switch (host_errno) {
517 #define E(X)  case X: return TARGET_##X;
518 #include "errnos.c.inc"
519 #undef E
520     default:
521         return host_errno;
522     }
523 }
524 
525 static inline int target_to_host_errno(int target_errno)
526 {
527     switch (target_errno) {
528 #define E(X)  case TARGET_##X: return X;
529 #include "errnos.c.inc"
530 #undef E
531     default:
532         return target_errno;
533     }
534 }
535 
536 static inline abi_long get_errno(abi_long ret)
537 {
538     if (ret == -1)
539         return -host_to_target_errno(errno);
540     else
541         return ret;
542 }
543 
544 const char *target_strerror(int err)
545 {
546     if (err == TARGET_ERESTARTSYS) {
547         return "To be restarted";
548     }
549     if (err == TARGET_QEMU_ESIGRETURN) {
550         return "Successful exit from sigreturn";
551     }
552 
553     return strerror(target_to_host_errno(err));
554 }
555 
556 #define safe_syscall0(type, name) \
557 static type safe_##name(void) \
558 { \
559     return safe_syscall(__NR_##name); \
560 }
561 
562 #define safe_syscall1(type, name, type1, arg1) \
563 static type safe_##name(type1 arg1) \
564 { \
565     return safe_syscall(__NR_##name, arg1); \
566 }
567 
568 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
569 static type safe_##name(type1 arg1, type2 arg2) \
570 { \
571     return safe_syscall(__NR_##name, arg1, arg2); \
572 }
573 
574 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
575 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
576 { \
577     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
578 }
579 
580 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
581     type4, arg4) \
582 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
583 { \
584     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
585 }
586 
587 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
588     type4, arg4, type5, arg5) \
589 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
590     type5 arg5) \
591 { \
592     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
593 }
594 
595 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
596     type4, arg4, type5, arg5, type6, arg6) \
597 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
598     type5 arg5, type6 arg6) \
599 { \
600     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
601 }
602 
603 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
604 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
605 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
606               int, flags, mode_t, mode)
607 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
608 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
609               struct rusage *, rusage)
610 #endif
611 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
612               int, options, struct rusage *, rusage)
613 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
614 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
615     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
616 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
617               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
618 #endif
619 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
620 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
621               struct timespec *, tsp, const sigset_t *, sigmask,
622               size_t, sigsetsize)
623 #endif
624 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
625               int, maxevents, int, timeout, const sigset_t *, sigmask,
626               size_t, sigsetsize)
627 #if defined(__NR_futex)
628 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
629               const struct timespec *,timeout,int *,uaddr2,int,val3)
630 #endif
631 #if defined(__NR_futex_time64)
632 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
633               const struct timespec *,timeout,int *,uaddr2,int,val3)
634 #endif
635 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
636 safe_syscall2(int, kill, pid_t, pid, int, sig)
637 safe_syscall2(int, tkill, int, tid, int, sig)
638 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
639 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
640 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
641 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
642               unsigned long, pos_l, unsigned long, pos_h)
643 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
644               unsigned long, pos_l, unsigned long, pos_h)
645 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
646               socklen_t, addrlen)
647 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
648               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
649 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
650               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
651 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
652 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
653 safe_syscall2(int, flock, int, fd, int, operation)
654 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
655 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
656               const struct timespec *, uts, size_t, sigsetsize)
657 #endif
658 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
659               int, flags)
660 #if defined(TARGET_NR_nanosleep)
661 safe_syscall2(int, nanosleep, const struct timespec *, req,
662               struct timespec *, rem)
663 #endif
664 #if defined(TARGET_NR_clock_nanosleep) || \
665     defined(TARGET_NR_clock_nanosleep_time64)
666 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
667               const struct timespec *, req, struct timespec *, rem)
668 #endif
669 #ifdef __NR_ipc
670 #ifdef __s390x__
671 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
672               void *, ptr)
673 #else
674 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
675               void *, ptr, long, fifth)
676 #endif
677 #endif
678 #ifdef __NR_msgsnd
679 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
680               int, flags)
681 #endif
682 #ifdef __NR_msgrcv
683 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
684               long, msgtype, int, flags)
685 #endif
686 #ifdef __NR_semtimedop
687 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
688               unsigned, nsops, const struct timespec *, timeout)
689 #endif
690 #if defined(TARGET_NR_mq_timedsend) || \
691     defined(TARGET_NR_mq_timedsend_time64)
692 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
693               size_t, len, unsigned, prio, const struct timespec *, timeout)
694 #endif
695 #if defined(TARGET_NR_mq_timedreceive) || \
696     defined(TARGET_NR_mq_timedreceive_time64)
697 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
698               size_t, len, unsigned *, prio, const struct timespec *, timeout)
699 #endif
700 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
701 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
702               int, outfd, loff_t *, poutoff, size_t, length,
703               unsigned int, flags)
704 #endif
705 
706 /* We do ioctl like this rather than via safe_syscall3 to preserve the
707  * "third argument might be integer or pointer or not present" behaviour of
708  * the libc function.
709  */
710 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
711 /* Similarly for fcntl. Note that callers must always:
712  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
713  *  use the flock64 struct rather than unsuffixed flock
714  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
715  */
716 #ifdef __NR_fcntl64
717 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
718 #else
719 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
720 #endif
721 
722 static inline int host_to_target_sock_type(int host_type)
723 {
724     int target_type;
725 
726     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
727     case SOCK_DGRAM:
728         target_type = TARGET_SOCK_DGRAM;
729         break;
730     case SOCK_STREAM:
731         target_type = TARGET_SOCK_STREAM;
732         break;
733     default:
734         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
735         break;
736     }
737 
738 #if defined(SOCK_CLOEXEC)
739     if (host_type & SOCK_CLOEXEC) {
740         target_type |= TARGET_SOCK_CLOEXEC;
741     }
742 #endif
743 
744 #if defined(SOCK_NONBLOCK)
745     if (host_type & SOCK_NONBLOCK) {
746         target_type |= TARGET_SOCK_NONBLOCK;
747     }
748 #endif
749 
750     return target_type;
751 }
752 
753 static abi_ulong target_brk;
754 static abi_ulong target_original_brk;
755 static abi_ulong brk_page;
756 
757 void target_set_brk(abi_ulong new_brk)
758 {
759     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
760     brk_page = HOST_PAGE_ALIGN(target_brk);
761 }
762 
763 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
764 #define DEBUGF_BRK(message, args...)
765 
766 /* do_brk() must return target values and target errnos. */
767 abi_long do_brk(abi_ulong new_brk)
768 {
769     abi_long mapped_addr;
770     abi_ulong new_alloc_size;
771 
772     /* brk pointers are always untagged */
773 
774     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
775 
776     if (!new_brk) {
777         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
778         return target_brk;
779     }
780     if (new_brk < target_original_brk) {
781         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
782                    target_brk);
783         return target_brk;
784     }
785 
786     /* If the new brk is less than the highest page reserved to the
787      * target heap allocation, set it and we're almost done...  */
788     if (new_brk <= brk_page) {
789         /* Heap contents are initialized to zero, as for anonymous
790          * mapped pages.  */
791         if (new_brk > target_brk) {
792             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
793         }
794 	target_brk = new_brk;
795         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
796 	return target_brk;
797     }
798 
799     /* We need to allocate more memory after the brk... Note that
800      * we don't use MAP_FIXED because that will map over the top of
801      * any existing mapping (like the one with the host libc or qemu
802      * itself); instead we treat "mapped but at wrong address" as
803      * a failure and unmap again.
804      */
805     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
806     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
807                                         PROT_READ|PROT_WRITE,
808                                         MAP_ANON|MAP_PRIVATE, 0, 0));
809 
810     if (mapped_addr == brk_page) {
811         /* Heap contents are initialized to zero, as for anonymous
812          * mapped pages.  Technically the new pages are already
813          * initialized to zero since they *are* anonymous mapped
814          * pages, however we have to take care with the contents that
815          * come from the remaining part of the previous page: it may
816          * contains garbage data due to a previous heap usage (grown
817          * then shrunken).  */
818         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
819 
820         target_brk = new_brk;
821         brk_page = HOST_PAGE_ALIGN(target_brk);
822         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
823             target_brk);
824         return target_brk;
825     } else if (mapped_addr != -1) {
826         /* Mapped but at wrong address, meaning there wasn't actually
827          * enough space for this brk.
828          */
829         target_munmap(mapped_addr, new_alloc_size);
830         mapped_addr = -1;
831         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
832     }
833     else {
834         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
835     }
836 
837 #if defined(TARGET_ALPHA)
838     /* We (partially) emulate OSF/1 on Alpha, which requires we
839        return a proper errno, not an unchanged brk value.  */
840     return -TARGET_ENOMEM;
841 #endif
842     /* For everything else, return the previous break. */
843     return target_brk;
844 }
845 
846 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
847     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
848 static inline abi_long copy_from_user_fdset(fd_set *fds,
849                                             abi_ulong target_fds_addr,
850                                             int n)
851 {
852     int i, nw, j, k;
853     abi_ulong b, *target_fds;
854 
855     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
856     if (!(target_fds = lock_user(VERIFY_READ,
857                                  target_fds_addr,
858                                  sizeof(abi_ulong) * nw,
859                                  1)))
860         return -TARGET_EFAULT;
861 
862     FD_ZERO(fds);
863     k = 0;
864     for (i = 0; i < nw; i++) {
865         /* grab the abi_ulong */
866         __get_user(b, &target_fds[i]);
867         for (j = 0; j < TARGET_ABI_BITS; j++) {
868             /* check the bit inside the abi_ulong */
869             if ((b >> j) & 1)
870                 FD_SET(k, fds);
871             k++;
872         }
873     }
874 
875     unlock_user(target_fds, target_fds_addr, 0);
876 
877     return 0;
878 }
879 
880 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
881                                                  abi_ulong target_fds_addr,
882                                                  int n)
883 {
884     if (target_fds_addr) {
885         if (copy_from_user_fdset(fds, target_fds_addr, n))
886             return -TARGET_EFAULT;
887         *fds_ptr = fds;
888     } else {
889         *fds_ptr = NULL;
890     }
891     return 0;
892 }
893 
894 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
895                                           const fd_set *fds,
896                                           int n)
897 {
898     int i, nw, j, k;
899     abi_long v;
900     abi_ulong *target_fds;
901 
902     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
903     if (!(target_fds = lock_user(VERIFY_WRITE,
904                                  target_fds_addr,
905                                  sizeof(abi_ulong) * nw,
906                                  0)))
907         return -TARGET_EFAULT;
908 
909     k = 0;
910     for (i = 0; i < nw; i++) {
911         v = 0;
912         for (j = 0; j < TARGET_ABI_BITS; j++) {
913             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
914             k++;
915         }
916         __put_user(v, &target_fds[i]);
917     }
918 
919     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
920 
921     return 0;
922 }
923 #endif
924 
925 #if defined(__alpha__)
926 #define HOST_HZ 1024
927 #else
928 #define HOST_HZ 100
929 #endif
930 
931 static inline abi_long host_to_target_clock_t(long ticks)
932 {
933 #if HOST_HZ == TARGET_HZ
934     return ticks;
935 #else
936     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
937 #endif
938 }
939 
940 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
941                                              const struct rusage *rusage)
942 {
943     struct target_rusage *target_rusage;
944 
945     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
946         return -TARGET_EFAULT;
947     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
948     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
949     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
950     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
951     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
952     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
953     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
954     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
955     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
956     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
957     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
958     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
959     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
960     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
961     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
962     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
963     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
964     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
965     unlock_user_struct(target_rusage, target_addr, 1);
966 
967     return 0;
968 }
969 
970 #ifdef TARGET_NR_setrlimit
971 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
972 {
973     abi_ulong target_rlim_swap;
974     rlim_t result;
975 
976     target_rlim_swap = tswapal(target_rlim);
977     if (target_rlim_swap == TARGET_RLIM_INFINITY)
978         return RLIM_INFINITY;
979 
980     result = target_rlim_swap;
981     if (target_rlim_swap != (rlim_t)result)
982         return RLIM_INFINITY;
983 
984     return result;
985 }
986 #endif
987 
988 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
989 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
990 {
991     abi_ulong target_rlim_swap;
992     abi_ulong result;
993 
994     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
995         target_rlim_swap = TARGET_RLIM_INFINITY;
996     else
997         target_rlim_swap = rlim;
998     result = tswapal(target_rlim_swap);
999 
1000     return result;
1001 }
1002 #endif
1003 
1004 static inline int target_to_host_resource(int code)
1005 {
1006     switch (code) {
1007     case TARGET_RLIMIT_AS:
1008         return RLIMIT_AS;
1009     case TARGET_RLIMIT_CORE:
1010         return RLIMIT_CORE;
1011     case TARGET_RLIMIT_CPU:
1012         return RLIMIT_CPU;
1013     case TARGET_RLIMIT_DATA:
1014         return RLIMIT_DATA;
1015     case TARGET_RLIMIT_FSIZE:
1016         return RLIMIT_FSIZE;
1017     case TARGET_RLIMIT_LOCKS:
1018         return RLIMIT_LOCKS;
1019     case TARGET_RLIMIT_MEMLOCK:
1020         return RLIMIT_MEMLOCK;
1021     case TARGET_RLIMIT_MSGQUEUE:
1022         return RLIMIT_MSGQUEUE;
1023     case TARGET_RLIMIT_NICE:
1024         return RLIMIT_NICE;
1025     case TARGET_RLIMIT_NOFILE:
1026         return RLIMIT_NOFILE;
1027     case TARGET_RLIMIT_NPROC:
1028         return RLIMIT_NPROC;
1029     case TARGET_RLIMIT_RSS:
1030         return RLIMIT_RSS;
1031     case TARGET_RLIMIT_RTPRIO:
1032         return RLIMIT_RTPRIO;
1033     case TARGET_RLIMIT_SIGPENDING:
1034         return RLIMIT_SIGPENDING;
1035     case TARGET_RLIMIT_STACK:
1036         return RLIMIT_STACK;
1037     default:
1038         return code;
1039     }
1040 }
1041 
1042 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1043                                               abi_ulong target_tv_addr)
1044 {
1045     struct target_timeval *target_tv;
1046 
1047     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1048         return -TARGET_EFAULT;
1049     }
1050 
1051     __get_user(tv->tv_sec, &target_tv->tv_sec);
1052     __get_user(tv->tv_usec, &target_tv->tv_usec);
1053 
1054     unlock_user_struct(target_tv, target_tv_addr, 0);
1055 
1056     return 0;
1057 }
1058 
1059 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1060                                             const struct timeval *tv)
1061 {
1062     struct target_timeval *target_tv;
1063 
1064     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1065         return -TARGET_EFAULT;
1066     }
1067 
1068     __put_user(tv->tv_sec, &target_tv->tv_sec);
1069     __put_user(tv->tv_usec, &target_tv->tv_usec);
1070 
1071     unlock_user_struct(target_tv, target_tv_addr, 1);
1072 
1073     return 0;
1074 }
1075 
1076 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1077 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1078                                                 abi_ulong target_tv_addr)
1079 {
1080     struct target__kernel_sock_timeval *target_tv;
1081 
1082     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1083         return -TARGET_EFAULT;
1084     }
1085 
1086     __get_user(tv->tv_sec, &target_tv->tv_sec);
1087     __get_user(tv->tv_usec, &target_tv->tv_usec);
1088 
1089     unlock_user_struct(target_tv, target_tv_addr, 0);
1090 
1091     return 0;
1092 }
1093 #endif
1094 
1095 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1096                                               const struct timeval *tv)
1097 {
1098     struct target__kernel_sock_timeval *target_tv;
1099 
1100     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1101         return -TARGET_EFAULT;
1102     }
1103 
1104     __put_user(tv->tv_sec, &target_tv->tv_sec);
1105     __put_user(tv->tv_usec, &target_tv->tv_usec);
1106 
1107     unlock_user_struct(target_tv, target_tv_addr, 1);
1108 
1109     return 0;
1110 }
1111 
1112 #if defined(TARGET_NR_futex) || \
1113     defined(TARGET_NR_rt_sigtimedwait) || \
1114     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1115     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1116     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1117     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1118     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1119     defined(TARGET_NR_timer_settime) || \
1120     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1121 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1122                                                abi_ulong target_addr)
1123 {
1124     struct target_timespec *target_ts;
1125 
1126     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1127         return -TARGET_EFAULT;
1128     }
1129     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1130     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1131     unlock_user_struct(target_ts, target_addr, 0);
1132     return 0;
1133 }
1134 #endif
1135 
1136 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1137     defined(TARGET_NR_timer_settime64) || \
1138     defined(TARGET_NR_mq_timedsend_time64) || \
1139     defined(TARGET_NR_mq_timedreceive_time64) || \
1140     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1141     defined(TARGET_NR_clock_nanosleep_time64) || \
1142     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1143     defined(TARGET_NR_utimensat) || \
1144     defined(TARGET_NR_utimensat_time64) || \
1145     defined(TARGET_NR_semtimedop_time64) || \
1146     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1147 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1148                                                  abi_ulong target_addr)
1149 {
1150     struct target__kernel_timespec *target_ts;
1151 
1152     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1153         return -TARGET_EFAULT;
1154     }
1155     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1156     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1157     /* in 32bit mode, this drops the padding */
1158     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1159     unlock_user_struct(target_ts, target_addr, 0);
1160     return 0;
1161 }
1162 #endif
1163 
1164 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1165                                                struct timespec *host_ts)
1166 {
1167     struct target_timespec *target_ts;
1168 
1169     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1170         return -TARGET_EFAULT;
1171     }
1172     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1173     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1174     unlock_user_struct(target_ts, target_addr, 1);
1175     return 0;
1176 }
1177 
1178 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1179                                                  struct timespec *host_ts)
1180 {
1181     struct target__kernel_timespec *target_ts;
1182 
1183     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1184         return -TARGET_EFAULT;
1185     }
1186     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1187     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1188     unlock_user_struct(target_ts, target_addr, 1);
1189     return 0;
1190 }
1191 
1192 #if defined(TARGET_NR_gettimeofday)
1193 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1194                                              struct timezone *tz)
1195 {
1196     struct target_timezone *target_tz;
1197 
1198     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1199         return -TARGET_EFAULT;
1200     }
1201 
1202     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1203     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1204 
1205     unlock_user_struct(target_tz, target_tz_addr, 1);
1206 
1207     return 0;
1208 }
1209 #endif
1210 
1211 #if defined(TARGET_NR_settimeofday)
1212 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1213                                                abi_ulong target_tz_addr)
1214 {
1215     struct target_timezone *target_tz;
1216 
1217     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1218         return -TARGET_EFAULT;
1219     }
1220 
1221     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1222     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1223 
1224     unlock_user_struct(target_tz, target_tz_addr, 0);
1225 
1226     return 0;
1227 }
1228 #endif
1229 
1230 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1231 #include <mqueue.h>
1232 
1233 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1234                                               abi_ulong target_mq_attr_addr)
1235 {
1236     struct target_mq_attr *target_mq_attr;
1237 
1238     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1239                           target_mq_attr_addr, 1))
1240         return -TARGET_EFAULT;
1241 
1242     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1243     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1244     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1245     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1246 
1247     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1248 
1249     return 0;
1250 }
1251 
1252 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1253                                             const struct mq_attr *attr)
1254 {
1255     struct target_mq_attr *target_mq_attr;
1256 
1257     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1258                           target_mq_attr_addr, 0))
1259         return -TARGET_EFAULT;
1260 
1261     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1262     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1263     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1264     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1265 
1266     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1267 
1268     return 0;
1269 }
1270 #endif
1271 
1272 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1273 /* do_select() must return target values and target errnos. */
1274 static abi_long do_select(int n,
1275                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1276                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1277 {
1278     fd_set rfds, wfds, efds;
1279     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1280     struct timeval tv;
1281     struct timespec ts, *ts_ptr;
1282     abi_long ret;
1283 
1284     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1285     if (ret) {
1286         return ret;
1287     }
1288     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1289     if (ret) {
1290         return ret;
1291     }
1292     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1293     if (ret) {
1294         return ret;
1295     }
1296 
1297     if (target_tv_addr) {
1298         if (copy_from_user_timeval(&tv, target_tv_addr))
1299             return -TARGET_EFAULT;
1300         ts.tv_sec = tv.tv_sec;
1301         ts.tv_nsec = tv.tv_usec * 1000;
1302         ts_ptr = &ts;
1303     } else {
1304         ts_ptr = NULL;
1305     }
1306 
1307     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1308                                   ts_ptr, NULL));
1309 
1310     if (!is_error(ret)) {
1311         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1312             return -TARGET_EFAULT;
1313         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1314             return -TARGET_EFAULT;
1315         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1316             return -TARGET_EFAULT;
1317 
1318         if (target_tv_addr) {
1319             tv.tv_sec = ts.tv_sec;
1320             tv.tv_usec = ts.tv_nsec / 1000;
1321             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1322                 return -TARGET_EFAULT;
1323             }
1324         }
1325     }
1326 
1327     return ret;
1328 }
1329 
1330 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1331 static abi_long do_old_select(abi_ulong arg1)
1332 {
1333     struct target_sel_arg_struct *sel;
1334     abi_ulong inp, outp, exp, tvp;
1335     long nsel;
1336 
1337     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1338         return -TARGET_EFAULT;
1339     }
1340 
1341     nsel = tswapal(sel->n);
1342     inp = tswapal(sel->inp);
1343     outp = tswapal(sel->outp);
1344     exp = tswapal(sel->exp);
1345     tvp = tswapal(sel->tvp);
1346 
1347     unlock_user_struct(sel, arg1, 0);
1348 
1349     return do_select(nsel, inp, outp, exp, tvp);
1350 }
1351 #endif
1352 #endif
1353 
1354 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1355 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1356                             abi_long arg4, abi_long arg5, abi_long arg6,
1357                             bool time64)
1358 {
1359     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1360     fd_set rfds, wfds, efds;
1361     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1362     struct timespec ts, *ts_ptr;
1363     abi_long ret;
1364 
1365     /*
1366      * The 6th arg is actually two args smashed together,
1367      * so we cannot use the C library.
1368      */
1369     sigset_t set;
1370     struct {
1371         sigset_t *set;
1372         size_t size;
1373     } sig, *sig_ptr;
1374 
1375     abi_ulong arg_sigset, arg_sigsize, *arg7;
1376     target_sigset_t *target_sigset;
1377 
1378     n = arg1;
1379     rfd_addr = arg2;
1380     wfd_addr = arg3;
1381     efd_addr = arg4;
1382     ts_addr = arg5;
1383 
1384     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1385     if (ret) {
1386         return ret;
1387     }
1388     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1389     if (ret) {
1390         return ret;
1391     }
1392     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1393     if (ret) {
1394         return ret;
1395     }
1396 
1397     /*
1398      * This takes a timespec, and not a timeval, so we cannot
1399      * use the do_select() helper ...
1400      */
1401     if (ts_addr) {
1402         if (time64) {
1403             if (target_to_host_timespec64(&ts, ts_addr)) {
1404                 return -TARGET_EFAULT;
1405             }
1406         } else {
1407             if (target_to_host_timespec(&ts, ts_addr)) {
1408                 return -TARGET_EFAULT;
1409             }
1410         }
1411             ts_ptr = &ts;
1412     } else {
1413         ts_ptr = NULL;
1414     }
1415 
1416     /* Extract the two packed args for the sigset */
1417     if (arg6) {
1418         sig_ptr = &sig;
1419         sig.size = SIGSET_T_SIZE;
1420 
1421         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1422         if (!arg7) {
1423             return -TARGET_EFAULT;
1424         }
1425         arg_sigset = tswapal(arg7[0]);
1426         arg_sigsize = tswapal(arg7[1]);
1427         unlock_user(arg7, arg6, 0);
1428 
1429         if (arg_sigset) {
1430             sig.set = &set;
1431             if (arg_sigsize != sizeof(*target_sigset)) {
1432                 /* Like the kernel, we enforce correct size sigsets */
1433                 return -TARGET_EINVAL;
1434             }
1435             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1436                                       sizeof(*target_sigset), 1);
1437             if (!target_sigset) {
1438                 return -TARGET_EFAULT;
1439             }
1440             target_to_host_sigset(&set, target_sigset);
1441             unlock_user(target_sigset, arg_sigset, 0);
1442         } else {
1443             sig.set = NULL;
1444         }
1445     } else {
1446         sig_ptr = NULL;
1447     }
1448 
1449     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1450                                   ts_ptr, sig_ptr));
1451 
1452     if (!is_error(ret)) {
1453         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1454             return -TARGET_EFAULT;
1455         }
1456         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1457             return -TARGET_EFAULT;
1458         }
1459         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1460             return -TARGET_EFAULT;
1461         }
1462         if (time64) {
1463             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1464                 return -TARGET_EFAULT;
1465             }
1466         } else {
1467             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1468                 return -TARGET_EFAULT;
1469             }
1470         }
1471     }
1472     return ret;
1473 }
1474 #endif
1475 
1476 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1477     defined(TARGET_NR_ppoll_time64)
1478 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1479                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1480 {
1481     struct target_pollfd *target_pfd;
1482     unsigned int nfds = arg2;
1483     struct pollfd *pfd;
1484     unsigned int i;
1485     abi_long ret;
1486 
1487     pfd = NULL;
1488     target_pfd = NULL;
1489     if (nfds) {
1490         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1491             return -TARGET_EINVAL;
1492         }
1493         target_pfd = lock_user(VERIFY_WRITE, arg1,
1494                                sizeof(struct target_pollfd) * nfds, 1);
1495         if (!target_pfd) {
1496             return -TARGET_EFAULT;
1497         }
1498 
1499         pfd = alloca(sizeof(struct pollfd) * nfds);
1500         for (i = 0; i < nfds; i++) {
1501             pfd[i].fd = tswap32(target_pfd[i].fd);
1502             pfd[i].events = tswap16(target_pfd[i].events);
1503         }
1504     }
1505     if (ppoll) {
1506         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1507         target_sigset_t *target_set;
1508         sigset_t _set, *set = &_set;
1509 
1510         if (arg3) {
1511             if (time64) {
1512                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1513                     unlock_user(target_pfd, arg1, 0);
1514                     return -TARGET_EFAULT;
1515                 }
1516             } else {
1517                 if (target_to_host_timespec(timeout_ts, arg3)) {
1518                     unlock_user(target_pfd, arg1, 0);
1519                     return -TARGET_EFAULT;
1520                 }
1521             }
1522         } else {
1523             timeout_ts = NULL;
1524         }
1525 
1526         if (arg4) {
1527             if (arg5 != sizeof(target_sigset_t)) {
1528                 unlock_user(target_pfd, arg1, 0);
1529                 return -TARGET_EINVAL;
1530             }
1531 
1532             target_set = lock_user(VERIFY_READ, arg4,
1533                                    sizeof(target_sigset_t), 1);
1534             if (!target_set) {
1535                 unlock_user(target_pfd, arg1, 0);
1536                 return -TARGET_EFAULT;
1537             }
1538             target_to_host_sigset(set, target_set);
1539         } else {
1540             set = NULL;
1541         }
1542 
1543         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1544                                    set, SIGSET_T_SIZE));
1545 
1546         if (!is_error(ret) && arg3) {
1547             if (time64) {
1548                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1549                     return -TARGET_EFAULT;
1550                 }
1551             } else {
1552                 if (host_to_target_timespec(arg3, timeout_ts)) {
1553                     return -TARGET_EFAULT;
1554                 }
1555             }
1556         }
1557         if (arg4) {
1558             unlock_user(target_set, arg4, 0);
1559         }
1560     } else {
1561           struct timespec ts, *pts;
1562 
1563           if (arg3 >= 0) {
1564               /* Convert ms to secs, ns */
1565               ts.tv_sec = arg3 / 1000;
1566               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1567               pts = &ts;
1568           } else {
1569               /* -ve poll() timeout means "infinite" */
1570               pts = NULL;
1571           }
1572           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1573     }
1574 
1575     if (!is_error(ret)) {
1576         for (i = 0; i < nfds; i++) {
1577             target_pfd[i].revents = tswap16(pfd[i].revents);
1578         }
1579     }
1580     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1581     return ret;
1582 }
1583 #endif
1584 
1585 static abi_long do_pipe2(int host_pipe[], int flags)
1586 {
1587 #ifdef CONFIG_PIPE2
1588     return pipe2(host_pipe, flags);
1589 #else
1590     return -ENOSYS;
1591 #endif
1592 }
1593 
1594 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1595                         int flags, int is_pipe2)
1596 {
1597     int host_pipe[2];
1598     abi_long ret;
1599     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1600 
1601     if (is_error(ret))
1602         return get_errno(ret);
1603 
1604     /* Several targets have special calling conventions for the original
1605        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1606     if (!is_pipe2) {
1607 #if defined(TARGET_ALPHA)
1608         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1609         return host_pipe[0];
1610 #elif defined(TARGET_MIPS)
1611         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1612         return host_pipe[0];
1613 #elif defined(TARGET_SH4)
1614         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1615         return host_pipe[0];
1616 #elif defined(TARGET_SPARC)
1617         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1618         return host_pipe[0];
1619 #endif
1620     }
1621 
1622     if (put_user_s32(host_pipe[0], pipedes)
1623         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1624         return -TARGET_EFAULT;
1625     return get_errno(ret);
1626 }
1627 
1628 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1629                                               abi_ulong target_addr,
1630                                               socklen_t len)
1631 {
1632     struct target_ip_mreqn *target_smreqn;
1633 
1634     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1635     if (!target_smreqn)
1636         return -TARGET_EFAULT;
1637     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1638     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1639     if (len == sizeof(struct target_ip_mreqn))
1640         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1641     unlock_user(target_smreqn, target_addr, 0);
1642 
1643     return 0;
1644 }
1645 
1646 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1647                                                abi_ulong target_addr,
1648                                                socklen_t len)
1649 {
1650     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1651     sa_family_t sa_family;
1652     struct target_sockaddr *target_saddr;
1653 
1654     if (fd_trans_target_to_host_addr(fd)) {
1655         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1656     }
1657 
1658     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1659     if (!target_saddr)
1660         return -TARGET_EFAULT;
1661 
1662     sa_family = tswap16(target_saddr->sa_family);
1663 
1664     /* Oops. The caller might send a incomplete sun_path; sun_path
1665      * must be terminated by \0 (see the manual page), but
1666      * unfortunately it is quite common to specify sockaddr_un
1667      * length as "strlen(x->sun_path)" while it should be
1668      * "strlen(...) + 1". We'll fix that here if needed.
1669      * Linux kernel has a similar feature.
1670      */
1671 
1672     if (sa_family == AF_UNIX) {
1673         if (len < unix_maxlen && len > 0) {
1674             char *cp = (char*)target_saddr;
1675 
1676             if ( cp[len-1] && !cp[len] )
1677                 len++;
1678         }
1679         if (len > unix_maxlen)
1680             len = unix_maxlen;
1681     }
1682 
1683     memcpy(addr, target_saddr, len);
1684     addr->sa_family = sa_family;
1685     if (sa_family == AF_NETLINK) {
1686         struct sockaddr_nl *nladdr;
1687 
1688         nladdr = (struct sockaddr_nl *)addr;
1689         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1690         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1691     } else if (sa_family == AF_PACKET) {
1692 	struct target_sockaddr_ll *lladdr;
1693 
1694 	lladdr = (struct target_sockaddr_ll *)addr;
1695 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1696 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1697     }
1698     unlock_user(target_saddr, target_addr, 0);
1699 
1700     return 0;
1701 }
1702 
1703 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1704                                                struct sockaddr *addr,
1705                                                socklen_t len)
1706 {
1707     struct target_sockaddr *target_saddr;
1708 
1709     if (len == 0) {
1710         return 0;
1711     }
1712     assert(addr);
1713 
1714     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1715     if (!target_saddr)
1716         return -TARGET_EFAULT;
1717     memcpy(target_saddr, addr, len);
1718     if (len >= offsetof(struct target_sockaddr, sa_family) +
1719         sizeof(target_saddr->sa_family)) {
1720         target_saddr->sa_family = tswap16(addr->sa_family);
1721     }
1722     if (addr->sa_family == AF_NETLINK &&
1723         len >= sizeof(struct target_sockaddr_nl)) {
1724         struct target_sockaddr_nl *target_nl =
1725                (struct target_sockaddr_nl *)target_saddr;
1726         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1727         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1728     } else if (addr->sa_family == AF_PACKET) {
1729         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1730         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1731         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1732     } else if (addr->sa_family == AF_INET6 &&
1733                len >= sizeof(struct target_sockaddr_in6)) {
1734         struct target_sockaddr_in6 *target_in6 =
1735                (struct target_sockaddr_in6 *)target_saddr;
1736         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1737     }
1738     unlock_user(target_saddr, target_addr, len);
1739 
1740     return 0;
1741 }
1742 
1743 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1744                                            struct target_msghdr *target_msgh)
1745 {
1746     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1747     abi_long msg_controllen;
1748     abi_ulong target_cmsg_addr;
1749     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1750     socklen_t space = 0;
1751 
1752     msg_controllen = tswapal(target_msgh->msg_controllen);
1753     if (msg_controllen < sizeof (struct target_cmsghdr))
1754         goto the_end;
1755     target_cmsg_addr = tswapal(target_msgh->msg_control);
1756     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1757     target_cmsg_start = target_cmsg;
1758     if (!target_cmsg)
1759         return -TARGET_EFAULT;
1760 
1761     while (cmsg && target_cmsg) {
1762         void *data = CMSG_DATA(cmsg);
1763         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1764 
1765         int len = tswapal(target_cmsg->cmsg_len)
1766             - sizeof(struct target_cmsghdr);
1767 
1768         space += CMSG_SPACE(len);
1769         if (space > msgh->msg_controllen) {
1770             space -= CMSG_SPACE(len);
1771             /* This is a QEMU bug, since we allocated the payload
1772              * area ourselves (unlike overflow in host-to-target
1773              * conversion, which is just the guest giving us a buffer
1774              * that's too small). It can't happen for the payload types
1775              * we currently support; if it becomes an issue in future
1776              * we would need to improve our allocation strategy to
1777              * something more intelligent than "twice the size of the
1778              * target buffer we're reading from".
1779              */
1780             qemu_log_mask(LOG_UNIMP,
1781                           ("Unsupported ancillary data %d/%d: "
1782                            "unhandled msg size\n"),
1783                           tswap32(target_cmsg->cmsg_level),
1784                           tswap32(target_cmsg->cmsg_type));
1785             break;
1786         }
1787 
1788         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1789             cmsg->cmsg_level = SOL_SOCKET;
1790         } else {
1791             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1792         }
1793         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1794         cmsg->cmsg_len = CMSG_LEN(len);
1795 
1796         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1797             int *fd = (int *)data;
1798             int *target_fd = (int *)target_data;
1799             int i, numfds = len / sizeof(int);
1800 
1801             for (i = 0; i < numfds; i++) {
1802                 __get_user(fd[i], target_fd + i);
1803             }
1804         } else if (cmsg->cmsg_level == SOL_SOCKET
1805                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1806             struct ucred *cred = (struct ucred *)data;
1807             struct target_ucred *target_cred =
1808                 (struct target_ucred *)target_data;
1809 
1810             __get_user(cred->pid, &target_cred->pid);
1811             __get_user(cred->uid, &target_cred->uid);
1812             __get_user(cred->gid, &target_cred->gid);
1813         } else {
1814             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1815                           cmsg->cmsg_level, cmsg->cmsg_type);
1816             memcpy(data, target_data, len);
1817         }
1818 
1819         cmsg = CMSG_NXTHDR(msgh, cmsg);
1820         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1821                                          target_cmsg_start);
1822     }
1823     unlock_user(target_cmsg, target_cmsg_addr, 0);
1824  the_end:
1825     msgh->msg_controllen = space;
1826     return 0;
1827 }
1828 
1829 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1830                                            struct msghdr *msgh)
1831 {
1832     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1833     abi_long msg_controllen;
1834     abi_ulong target_cmsg_addr;
1835     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1836     socklen_t space = 0;
1837 
1838     msg_controllen = tswapal(target_msgh->msg_controllen);
1839     if (msg_controllen < sizeof (struct target_cmsghdr))
1840         goto the_end;
1841     target_cmsg_addr = tswapal(target_msgh->msg_control);
1842     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1843     target_cmsg_start = target_cmsg;
1844     if (!target_cmsg)
1845         return -TARGET_EFAULT;
1846 
1847     while (cmsg && target_cmsg) {
1848         void *data = CMSG_DATA(cmsg);
1849         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1850 
1851         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1852         int tgt_len, tgt_space;
1853 
1854         /* We never copy a half-header but may copy half-data;
1855          * this is Linux's behaviour in put_cmsg(). Note that
1856          * truncation here is a guest problem (which we report
1857          * to the guest via the CTRUNC bit), unlike truncation
1858          * in target_to_host_cmsg, which is a QEMU bug.
1859          */
1860         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1861             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1862             break;
1863         }
1864 
1865         if (cmsg->cmsg_level == SOL_SOCKET) {
1866             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1867         } else {
1868             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1869         }
1870         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1871 
1872         /* Payload types which need a different size of payload on
1873          * the target must adjust tgt_len here.
1874          */
1875         tgt_len = len;
1876         switch (cmsg->cmsg_level) {
1877         case SOL_SOCKET:
1878             switch (cmsg->cmsg_type) {
1879             case SO_TIMESTAMP:
1880                 tgt_len = sizeof(struct target_timeval);
1881                 break;
1882             default:
1883                 break;
1884             }
1885             break;
1886         default:
1887             break;
1888         }
1889 
1890         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1891             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1892             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1893         }
1894 
1895         /* We must now copy-and-convert len bytes of payload
1896          * into tgt_len bytes of destination space. Bear in mind
1897          * that in both source and destination we may be dealing
1898          * with a truncated value!
1899          */
1900         switch (cmsg->cmsg_level) {
1901         case SOL_SOCKET:
1902             switch (cmsg->cmsg_type) {
1903             case SCM_RIGHTS:
1904             {
1905                 int *fd = (int *)data;
1906                 int *target_fd = (int *)target_data;
1907                 int i, numfds = tgt_len / sizeof(int);
1908 
1909                 for (i = 0; i < numfds; i++) {
1910                     __put_user(fd[i], target_fd + i);
1911                 }
1912                 break;
1913             }
1914             case SO_TIMESTAMP:
1915             {
1916                 struct timeval *tv = (struct timeval *)data;
1917                 struct target_timeval *target_tv =
1918                     (struct target_timeval *)target_data;
1919 
1920                 if (len != sizeof(struct timeval) ||
1921                     tgt_len != sizeof(struct target_timeval)) {
1922                     goto unimplemented;
1923                 }
1924 
1925                 /* copy struct timeval to target */
1926                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1927                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1928                 break;
1929             }
1930             case SCM_CREDENTIALS:
1931             {
1932                 struct ucred *cred = (struct ucred *)data;
1933                 struct target_ucred *target_cred =
1934                     (struct target_ucred *)target_data;
1935 
1936                 __put_user(cred->pid, &target_cred->pid);
1937                 __put_user(cred->uid, &target_cred->uid);
1938                 __put_user(cred->gid, &target_cred->gid);
1939                 break;
1940             }
1941             default:
1942                 goto unimplemented;
1943             }
1944             break;
1945 
1946         case SOL_IP:
1947             switch (cmsg->cmsg_type) {
1948             case IP_TTL:
1949             {
1950                 uint32_t *v = (uint32_t *)data;
1951                 uint32_t *t_int = (uint32_t *)target_data;
1952 
1953                 if (len != sizeof(uint32_t) ||
1954                     tgt_len != sizeof(uint32_t)) {
1955                     goto unimplemented;
1956                 }
1957                 __put_user(*v, t_int);
1958                 break;
1959             }
1960             case IP_RECVERR:
1961             {
1962                 struct errhdr_t {
1963                    struct sock_extended_err ee;
1964                    struct sockaddr_in offender;
1965                 };
1966                 struct errhdr_t *errh = (struct errhdr_t *)data;
1967                 struct errhdr_t *target_errh =
1968                     (struct errhdr_t *)target_data;
1969 
1970                 if (len != sizeof(struct errhdr_t) ||
1971                     tgt_len != sizeof(struct errhdr_t)) {
1972                     goto unimplemented;
1973                 }
1974                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1975                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1976                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1977                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1978                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1979                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1980                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1981                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1982                     (void *) &errh->offender, sizeof(errh->offender));
1983                 break;
1984             }
1985             default:
1986                 goto unimplemented;
1987             }
1988             break;
1989 
1990         case SOL_IPV6:
1991             switch (cmsg->cmsg_type) {
1992             case IPV6_HOPLIMIT:
1993             {
1994                 uint32_t *v = (uint32_t *)data;
1995                 uint32_t *t_int = (uint32_t *)target_data;
1996 
1997                 if (len != sizeof(uint32_t) ||
1998                     tgt_len != sizeof(uint32_t)) {
1999                     goto unimplemented;
2000                 }
2001                 __put_user(*v, t_int);
2002                 break;
2003             }
2004             case IPV6_RECVERR:
2005             {
2006                 struct errhdr6_t {
2007                    struct sock_extended_err ee;
2008                    struct sockaddr_in6 offender;
2009                 };
2010                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2011                 struct errhdr6_t *target_errh =
2012                     (struct errhdr6_t *)target_data;
2013 
2014                 if (len != sizeof(struct errhdr6_t) ||
2015                     tgt_len != sizeof(struct errhdr6_t)) {
2016                     goto unimplemented;
2017                 }
2018                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2019                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2020                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2021                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2022                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2023                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2024                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2025                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2026                     (void *) &errh->offender, sizeof(errh->offender));
2027                 break;
2028             }
2029             default:
2030                 goto unimplemented;
2031             }
2032             break;
2033 
2034         default:
2035         unimplemented:
2036             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2037                           cmsg->cmsg_level, cmsg->cmsg_type);
2038             memcpy(target_data, data, MIN(len, tgt_len));
2039             if (tgt_len > len) {
2040                 memset(target_data + len, 0, tgt_len - len);
2041             }
2042         }
2043 
2044         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2045         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2046         if (msg_controllen < tgt_space) {
2047             tgt_space = msg_controllen;
2048         }
2049         msg_controllen -= tgt_space;
2050         space += tgt_space;
2051         cmsg = CMSG_NXTHDR(msgh, cmsg);
2052         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2053                                          target_cmsg_start);
2054     }
2055     unlock_user(target_cmsg, target_cmsg_addr, space);
2056  the_end:
2057     target_msgh->msg_controllen = tswapal(space);
2058     return 0;
2059 }
2060 
2061 /* do_setsockopt() Must return target values and target errnos. */
2062 static abi_long do_setsockopt(int sockfd, int level, int optname,
2063                               abi_ulong optval_addr, socklen_t optlen)
2064 {
2065     abi_long ret;
2066     int val;
2067     struct ip_mreqn *ip_mreq;
2068     struct ip_mreq_source *ip_mreq_source;
2069 
2070     switch(level) {
2071     case SOL_TCP:
2072     case SOL_UDP:
2073         /* TCP and UDP options all take an 'int' value.  */
2074         if (optlen < sizeof(uint32_t))
2075             return -TARGET_EINVAL;
2076 
2077         if (get_user_u32(val, optval_addr))
2078             return -TARGET_EFAULT;
2079         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2080         break;
2081     case SOL_IP:
2082         switch(optname) {
2083         case IP_TOS:
2084         case IP_TTL:
2085         case IP_HDRINCL:
2086         case IP_ROUTER_ALERT:
2087         case IP_RECVOPTS:
2088         case IP_RETOPTS:
2089         case IP_PKTINFO:
2090         case IP_MTU_DISCOVER:
2091         case IP_RECVERR:
2092         case IP_RECVTTL:
2093         case IP_RECVTOS:
2094 #ifdef IP_FREEBIND
2095         case IP_FREEBIND:
2096 #endif
2097         case IP_MULTICAST_TTL:
2098         case IP_MULTICAST_LOOP:
2099             val = 0;
2100             if (optlen >= sizeof(uint32_t)) {
2101                 if (get_user_u32(val, optval_addr))
2102                     return -TARGET_EFAULT;
2103             } else if (optlen >= 1) {
2104                 if (get_user_u8(val, optval_addr))
2105                     return -TARGET_EFAULT;
2106             }
2107             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2108             break;
2109         case IP_ADD_MEMBERSHIP:
2110         case IP_DROP_MEMBERSHIP:
2111             if (optlen < sizeof (struct target_ip_mreq) ||
2112                 optlen > sizeof (struct target_ip_mreqn))
2113                 return -TARGET_EINVAL;
2114 
2115             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2116             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2117             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2118             break;
2119 
2120         case IP_BLOCK_SOURCE:
2121         case IP_UNBLOCK_SOURCE:
2122         case IP_ADD_SOURCE_MEMBERSHIP:
2123         case IP_DROP_SOURCE_MEMBERSHIP:
2124             if (optlen != sizeof (struct target_ip_mreq_source))
2125                 return -TARGET_EINVAL;
2126 
2127             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2128             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2129             unlock_user (ip_mreq_source, optval_addr, 0);
2130             break;
2131 
2132         default:
2133             goto unimplemented;
2134         }
2135         break;
2136     case SOL_IPV6:
2137         switch (optname) {
2138         case IPV6_MTU_DISCOVER:
2139         case IPV6_MTU:
2140         case IPV6_V6ONLY:
2141         case IPV6_RECVPKTINFO:
2142         case IPV6_UNICAST_HOPS:
2143         case IPV6_MULTICAST_HOPS:
2144         case IPV6_MULTICAST_LOOP:
2145         case IPV6_RECVERR:
2146         case IPV6_RECVHOPLIMIT:
2147         case IPV6_2292HOPLIMIT:
2148         case IPV6_CHECKSUM:
2149         case IPV6_ADDRFORM:
2150         case IPV6_2292PKTINFO:
2151         case IPV6_RECVTCLASS:
2152         case IPV6_RECVRTHDR:
2153         case IPV6_2292RTHDR:
2154         case IPV6_RECVHOPOPTS:
2155         case IPV6_2292HOPOPTS:
2156         case IPV6_RECVDSTOPTS:
2157         case IPV6_2292DSTOPTS:
2158         case IPV6_TCLASS:
2159         case IPV6_ADDR_PREFERENCES:
2160 #ifdef IPV6_RECVPATHMTU
2161         case IPV6_RECVPATHMTU:
2162 #endif
2163 #ifdef IPV6_TRANSPARENT
2164         case IPV6_TRANSPARENT:
2165 #endif
2166 #ifdef IPV6_FREEBIND
2167         case IPV6_FREEBIND:
2168 #endif
2169 #ifdef IPV6_RECVORIGDSTADDR
2170         case IPV6_RECVORIGDSTADDR:
2171 #endif
2172             val = 0;
2173             if (optlen < sizeof(uint32_t)) {
2174                 return -TARGET_EINVAL;
2175             }
2176             if (get_user_u32(val, optval_addr)) {
2177                 return -TARGET_EFAULT;
2178             }
2179             ret = get_errno(setsockopt(sockfd, level, optname,
2180                                        &val, sizeof(val)));
2181             break;
2182         case IPV6_PKTINFO:
2183         {
2184             struct in6_pktinfo pki;
2185 
2186             if (optlen < sizeof(pki)) {
2187                 return -TARGET_EINVAL;
2188             }
2189 
2190             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2191                 return -TARGET_EFAULT;
2192             }
2193 
2194             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2195 
2196             ret = get_errno(setsockopt(sockfd, level, optname,
2197                                        &pki, sizeof(pki)));
2198             break;
2199         }
2200         case IPV6_ADD_MEMBERSHIP:
2201         case IPV6_DROP_MEMBERSHIP:
2202         {
2203             struct ipv6_mreq ipv6mreq;
2204 
2205             if (optlen < sizeof(ipv6mreq)) {
2206                 return -TARGET_EINVAL;
2207             }
2208 
2209             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2210                 return -TARGET_EFAULT;
2211             }
2212 
2213             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2214 
2215             ret = get_errno(setsockopt(sockfd, level, optname,
2216                                        &ipv6mreq, sizeof(ipv6mreq)));
2217             break;
2218         }
2219         default:
2220             goto unimplemented;
2221         }
2222         break;
2223     case SOL_ICMPV6:
2224         switch (optname) {
2225         case ICMPV6_FILTER:
2226         {
2227             struct icmp6_filter icmp6f;
2228 
2229             if (optlen > sizeof(icmp6f)) {
2230                 optlen = sizeof(icmp6f);
2231             }
2232 
2233             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2234                 return -TARGET_EFAULT;
2235             }
2236 
2237             for (val = 0; val < 8; val++) {
2238                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2239             }
2240 
2241             ret = get_errno(setsockopt(sockfd, level, optname,
2242                                        &icmp6f, optlen));
2243             break;
2244         }
2245         default:
2246             goto unimplemented;
2247         }
2248         break;
2249     case SOL_RAW:
2250         switch (optname) {
2251         case ICMP_FILTER:
2252         case IPV6_CHECKSUM:
2253             /* those take an u32 value */
2254             if (optlen < sizeof(uint32_t)) {
2255                 return -TARGET_EINVAL;
2256             }
2257 
2258             if (get_user_u32(val, optval_addr)) {
2259                 return -TARGET_EFAULT;
2260             }
2261             ret = get_errno(setsockopt(sockfd, level, optname,
2262                                        &val, sizeof(val)));
2263             break;
2264 
2265         default:
2266             goto unimplemented;
2267         }
2268         break;
2269 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2270     case SOL_ALG:
2271         switch (optname) {
2272         case ALG_SET_KEY:
2273         {
2274             char *alg_key = g_malloc(optlen);
2275 
2276             if (!alg_key) {
2277                 return -TARGET_ENOMEM;
2278             }
2279             if (copy_from_user(alg_key, optval_addr, optlen)) {
2280                 g_free(alg_key);
2281                 return -TARGET_EFAULT;
2282             }
2283             ret = get_errno(setsockopt(sockfd, level, optname,
2284                                        alg_key, optlen));
2285             g_free(alg_key);
2286             break;
2287         }
2288         case ALG_SET_AEAD_AUTHSIZE:
2289         {
2290             ret = get_errno(setsockopt(sockfd, level, optname,
2291                                        NULL, optlen));
2292             break;
2293         }
2294         default:
2295             goto unimplemented;
2296         }
2297         break;
2298 #endif
2299     case TARGET_SOL_SOCKET:
2300         switch (optname) {
2301         case TARGET_SO_RCVTIMEO:
2302         {
2303                 struct timeval tv;
2304 
2305                 optname = SO_RCVTIMEO;
2306 
2307 set_timeout:
2308                 if (optlen != sizeof(struct target_timeval)) {
2309                     return -TARGET_EINVAL;
2310                 }
2311 
2312                 if (copy_from_user_timeval(&tv, optval_addr)) {
2313                     return -TARGET_EFAULT;
2314                 }
2315 
2316                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2317                                 &tv, sizeof(tv)));
2318                 return ret;
2319         }
2320         case TARGET_SO_SNDTIMEO:
2321                 optname = SO_SNDTIMEO;
2322                 goto set_timeout;
2323         case TARGET_SO_ATTACH_FILTER:
2324         {
2325                 struct target_sock_fprog *tfprog;
2326                 struct target_sock_filter *tfilter;
2327                 struct sock_fprog fprog;
2328                 struct sock_filter *filter;
2329                 int i;
2330 
2331                 if (optlen != sizeof(*tfprog)) {
2332                     return -TARGET_EINVAL;
2333                 }
2334                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2335                     return -TARGET_EFAULT;
2336                 }
2337                 if (!lock_user_struct(VERIFY_READ, tfilter,
2338                                       tswapal(tfprog->filter), 0)) {
2339                     unlock_user_struct(tfprog, optval_addr, 1);
2340                     return -TARGET_EFAULT;
2341                 }
2342 
2343                 fprog.len = tswap16(tfprog->len);
2344                 filter = g_try_new(struct sock_filter, fprog.len);
2345                 if (filter == NULL) {
2346                     unlock_user_struct(tfilter, tfprog->filter, 1);
2347                     unlock_user_struct(tfprog, optval_addr, 1);
2348                     return -TARGET_ENOMEM;
2349                 }
2350                 for (i = 0; i < fprog.len; i++) {
2351                     filter[i].code = tswap16(tfilter[i].code);
2352                     filter[i].jt = tfilter[i].jt;
2353                     filter[i].jf = tfilter[i].jf;
2354                     filter[i].k = tswap32(tfilter[i].k);
2355                 }
2356                 fprog.filter = filter;
2357 
2358                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2359                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2360                 g_free(filter);
2361 
2362                 unlock_user_struct(tfilter, tfprog->filter, 1);
2363                 unlock_user_struct(tfprog, optval_addr, 1);
2364                 return ret;
2365         }
2366 	case TARGET_SO_BINDTODEVICE:
2367 	{
2368 		char *dev_ifname, *addr_ifname;
2369 
2370 		if (optlen > IFNAMSIZ - 1) {
2371 		    optlen = IFNAMSIZ - 1;
2372 		}
2373 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2374 		if (!dev_ifname) {
2375 		    return -TARGET_EFAULT;
2376 		}
2377 		optname = SO_BINDTODEVICE;
2378 		addr_ifname = alloca(IFNAMSIZ);
2379 		memcpy(addr_ifname, dev_ifname, optlen);
2380 		addr_ifname[optlen] = 0;
2381 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2382                                            addr_ifname, optlen));
2383 		unlock_user (dev_ifname, optval_addr, 0);
2384 		return ret;
2385 	}
2386         case TARGET_SO_LINGER:
2387         {
2388                 struct linger lg;
2389                 struct target_linger *tlg;
2390 
2391                 if (optlen != sizeof(struct target_linger)) {
2392                     return -TARGET_EINVAL;
2393                 }
2394                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2395                     return -TARGET_EFAULT;
2396                 }
2397                 __get_user(lg.l_onoff, &tlg->l_onoff);
2398                 __get_user(lg.l_linger, &tlg->l_linger);
2399                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2400                                 &lg, sizeof(lg)));
2401                 unlock_user_struct(tlg, optval_addr, 0);
2402                 return ret;
2403         }
2404             /* Options with 'int' argument.  */
2405         case TARGET_SO_DEBUG:
2406 		optname = SO_DEBUG;
2407 		break;
2408         case TARGET_SO_REUSEADDR:
2409 		optname = SO_REUSEADDR;
2410 		break;
2411 #ifdef SO_REUSEPORT
2412         case TARGET_SO_REUSEPORT:
2413                 optname = SO_REUSEPORT;
2414                 break;
2415 #endif
2416         case TARGET_SO_TYPE:
2417 		optname = SO_TYPE;
2418 		break;
2419         case TARGET_SO_ERROR:
2420 		optname = SO_ERROR;
2421 		break;
2422         case TARGET_SO_DONTROUTE:
2423 		optname = SO_DONTROUTE;
2424 		break;
2425         case TARGET_SO_BROADCAST:
2426 		optname = SO_BROADCAST;
2427 		break;
2428         case TARGET_SO_SNDBUF:
2429 		optname = SO_SNDBUF;
2430 		break;
2431         case TARGET_SO_SNDBUFFORCE:
2432                 optname = SO_SNDBUFFORCE;
2433                 break;
2434         case TARGET_SO_RCVBUF:
2435 		optname = SO_RCVBUF;
2436 		break;
2437         case TARGET_SO_RCVBUFFORCE:
2438                 optname = SO_RCVBUFFORCE;
2439                 break;
2440         case TARGET_SO_KEEPALIVE:
2441 		optname = SO_KEEPALIVE;
2442 		break;
2443         case TARGET_SO_OOBINLINE:
2444 		optname = SO_OOBINLINE;
2445 		break;
2446         case TARGET_SO_NO_CHECK:
2447 		optname = SO_NO_CHECK;
2448 		break;
2449         case TARGET_SO_PRIORITY:
2450 		optname = SO_PRIORITY;
2451 		break;
2452 #ifdef SO_BSDCOMPAT
2453         case TARGET_SO_BSDCOMPAT:
2454 		optname = SO_BSDCOMPAT;
2455 		break;
2456 #endif
2457         case TARGET_SO_PASSCRED:
2458 		optname = SO_PASSCRED;
2459 		break;
2460         case TARGET_SO_PASSSEC:
2461                 optname = SO_PASSSEC;
2462                 break;
2463         case TARGET_SO_TIMESTAMP:
2464 		optname = SO_TIMESTAMP;
2465 		break;
2466         case TARGET_SO_RCVLOWAT:
2467 		optname = SO_RCVLOWAT;
2468 		break;
2469         default:
2470             goto unimplemented;
2471         }
2472 	if (optlen < sizeof(uint32_t))
2473             return -TARGET_EINVAL;
2474 
2475 	if (get_user_u32(val, optval_addr))
2476             return -TARGET_EFAULT;
2477 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2478         break;
2479 #ifdef SOL_NETLINK
2480     case SOL_NETLINK:
2481         switch (optname) {
2482         case NETLINK_PKTINFO:
2483         case NETLINK_ADD_MEMBERSHIP:
2484         case NETLINK_DROP_MEMBERSHIP:
2485         case NETLINK_BROADCAST_ERROR:
2486         case NETLINK_NO_ENOBUFS:
2487 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2488         case NETLINK_LISTEN_ALL_NSID:
2489         case NETLINK_CAP_ACK:
2490 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2491 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2492         case NETLINK_EXT_ACK:
2493 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2494 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2495         case NETLINK_GET_STRICT_CHK:
2496 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2497             break;
2498         default:
2499             goto unimplemented;
2500         }
2501         val = 0;
2502         if (optlen < sizeof(uint32_t)) {
2503             return -TARGET_EINVAL;
2504         }
2505         if (get_user_u32(val, optval_addr)) {
2506             return -TARGET_EFAULT;
2507         }
2508         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2509                                    sizeof(val)));
2510         break;
2511 #endif /* SOL_NETLINK */
2512     default:
2513     unimplemented:
2514         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2515                       level, optname);
2516         ret = -TARGET_ENOPROTOOPT;
2517     }
2518     return ret;
2519 }
2520 
2521 /* do_getsockopt() Must return target values and target errnos. */
2522 static abi_long do_getsockopt(int sockfd, int level, int optname,
2523                               abi_ulong optval_addr, abi_ulong optlen)
2524 {
2525     abi_long ret;
2526     int len, val;
2527     socklen_t lv;
2528 
2529     switch(level) {
2530     case TARGET_SOL_SOCKET:
2531         level = SOL_SOCKET;
2532         switch (optname) {
2533         /* These don't just return a single integer */
2534         case TARGET_SO_PEERNAME:
2535             goto unimplemented;
2536         case TARGET_SO_RCVTIMEO: {
2537             struct timeval tv;
2538             socklen_t tvlen;
2539 
2540             optname = SO_RCVTIMEO;
2541 
2542 get_timeout:
2543             if (get_user_u32(len, optlen)) {
2544                 return -TARGET_EFAULT;
2545             }
2546             if (len < 0) {
2547                 return -TARGET_EINVAL;
2548             }
2549 
2550             tvlen = sizeof(tv);
2551             ret = get_errno(getsockopt(sockfd, level, optname,
2552                                        &tv, &tvlen));
2553             if (ret < 0) {
2554                 return ret;
2555             }
2556             if (len > sizeof(struct target_timeval)) {
2557                 len = sizeof(struct target_timeval);
2558             }
2559             if (copy_to_user_timeval(optval_addr, &tv)) {
2560                 return -TARGET_EFAULT;
2561             }
2562             if (put_user_u32(len, optlen)) {
2563                 return -TARGET_EFAULT;
2564             }
2565             break;
2566         }
2567         case TARGET_SO_SNDTIMEO:
2568             optname = SO_SNDTIMEO;
2569             goto get_timeout;
2570         case TARGET_SO_PEERCRED: {
2571             struct ucred cr;
2572             socklen_t crlen;
2573             struct target_ucred *tcr;
2574 
2575             if (get_user_u32(len, optlen)) {
2576                 return -TARGET_EFAULT;
2577             }
2578             if (len < 0) {
2579                 return -TARGET_EINVAL;
2580             }
2581 
2582             crlen = sizeof(cr);
2583             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2584                                        &cr, &crlen));
2585             if (ret < 0) {
2586                 return ret;
2587             }
2588             if (len > crlen) {
2589                 len = crlen;
2590             }
2591             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2592                 return -TARGET_EFAULT;
2593             }
2594             __put_user(cr.pid, &tcr->pid);
2595             __put_user(cr.uid, &tcr->uid);
2596             __put_user(cr.gid, &tcr->gid);
2597             unlock_user_struct(tcr, optval_addr, 1);
2598             if (put_user_u32(len, optlen)) {
2599                 return -TARGET_EFAULT;
2600             }
2601             break;
2602         }
2603         case TARGET_SO_PEERSEC: {
2604             char *name;
2605 
2606             if (get_user_u32(len, optlen)) {
2607                 return -TARGET_EFAULT;
2608             }
2609             if (len < 0) {
2610                 return -TARGET_EINVAL;
2611             }
2612             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2613             if (!name) {
2614                 return -TARGET_EFAULT;
2615             }
2616             lv = len;
2617             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2618                                        name, &lv));
2619             if (put_user_u32(lv, optlen)) {
2620                 ret = -TARGET_EFAULT;
2621             }
2622             unlock_user(name, optval_addr, lv);
2623             break;
2624         }
2625         case TARGET_SO_LINGER:
2626         {
2627             struct linger lg;
2628             socklen_t lglen;
2629             struct target_linger *tlg;
2630 
2631             if (get_user_u32(len, optlen)) {
2632                 return -TARGET_EFAULT;
2633             }
2634             if (len < 0) {
2635                 return -TARGET_EINVAL;
2636             }
2637 
2638             lglen = sizeof(lg);
2639             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2640                                        &lg, &lglen));
2641             if (ret < 0) {
2642                 return ret;
2643             }
2644             if (len > lglen) {
2645                 len = lglen;
2646             }
2647             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2648                 return -TARGET_EFAULT;
2649             }
2650             __put_user(lg.l_onoff, &tlg->l_onoff);
2651             __put_user(lg.l_linger, &tlg->l_linger);
2652             unlock_user_struct(tlg, optval_addr, 1);
2653             if (put_user_u32(len, optlen)) {
2654                 return -TARGET_EFAULT;
2655             }
2656             break;
2657         }
2658         /* Options with 'int' argument.  */
2659         case TARGET_SO_DEBUG:
2660             optname = SO_DEBUG;
2661             goto int_case;
2662         case TARGET_SO_REUSEADDR:
2663             optname = SO_REUSEADDR;
2664             goto int_case;
2665 #ifdef SO_REUSEPORT
2666         case TARGET_SO_REUSEPORT:
2667             optname = SO_REUSEPORT;
2668             goto int_case;
2669 #endif
2670         case TARGET_SO_TYPE:
2671             optname = SO_TYPE;
2672             goto int_case;
2673         case TARGET_SO_ERROR:
2674             optname = SO_ERROR;
2675             goto int_case;
2676         case TARGET_SO_DONTROUTE:
2677             optname = SO_DONTROUTE;
2678             goto int_case;
2679         case TARGET_SO_BROADCAST:
2680             optname = SO_BROADCAST;
2681             goto int_case;
2682         case TARGET_SO_SNDBUF:
2683             optname = SO_SNDBUF;
2684             goto int_case;
2685         case TARGET_SO_RCVBUF:
2686             optname = SO_RCVBUF;
2687             goto int_case;
2688         case TARGET_SO_KEEPALIVE:
2689             optname = SO_KEEPALIVE;
2690             goto int_case;
2691         case TARGET_SO_OOBINLINE:
2692             optname = SO_OOBINLINE;
2693             goto int_case;
2694         case TARGET_SO_NO_CHECK:
2695             optname = SO_NO_CHECK;
2696             goto int_case;
2697         case TARGET_SO_PRIORITY:
2698             optname = SO_PRIORITY;
2699             goto int_case;
2700 #ifdef SO_BSDCOMPAT
2701         case TARGET_SO_BSDCOMPAT:
2702             optname = SO_BSDCOMPAT;
2703             goto int_case;
2704 #endif
2705         case TARGET_SO_PASSCRED:
2706             optname = SO_PASSCRED;
2707             goto int_case;
2708         case TARGET_SO_TIMESTAMP:
2709             optname = SO_TIMESTAMP;
2710             goto int_case;
2711         case TARGET_SO_RCVLOWAT:
2712             optname = SO_RCVLOWAT;
2713             goto int_case;
2714         case TARGET_SO_ACCEPTCONN:
2715             optname = SO_ACCEPTCONN;
2716             goto int_case;
2717         case TARGET_SO_PROTOCOL:
2718             optname = SO_PROTOCOL;
2719             goto int_case;
2720         case TARGET_SO_DOMAIN:
2721             optname = SO_DOMAIN;
2722             goto int_case;
2723         default:
2724             goto int_case;
2725         }
2726         break;
2727     case SOL_TCP:
2728     case SOL_UDP:
2729         /* TCP and UDP options all take an 'int' value.  */
2730     int_case:
2731         if (get_user_u32(len, optlen))
2732             return -TARGET_EFAULT;
2733         if (len < 0)
2734             return -TARGET_EINVAL;
2735         lv = sizeof(lv);
2736         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2737         if (ret < 0)
2738             return ret;
2739         if (optname == SO_TYPE) {
2740             val = host_to_target_sock_type(val);
2741         }
2742         if (len > lv)
2743             len = lv;
2744         if (len == 4) {
2745             if (put_user_u32(val, optval_addr))
2746                 return -TARGET_EFAULT;
2747         } else {
2748             if (put_user_u8(val, optval_addr))
2749                 return -TARGET_EFAULT;
2750         }
2751         if (put_user_u32(len, optlen))
2752             return -TARGET_EFAULT;
2753         break;
2754     case SOL_IP:
2755         switch(optname) {
2756         case IP_TOS:
2757         case IP_TTL:
2758         case IP_HDRINCL:
2759         case IP_ROUTER_ALERT:
2760         case IP_RECVOPTS:
2761         case IP_RETOPTS:
2762         case IP_PKTINFO:
2763         case IP_MTU_DISCOVER:
2764         case IP_RECVERR:
2765         case IP_RECVTOS:
2766 #ifdef IP_FREEBIND
2767         case IP_FREEBIND:
2768 #endif
2769         case IP_MULTICAST_TTL:
2770         case IP_MULTICAST_LOOP:
2771             if (get_user_u32(len, optlen))
2772                 return -TARGET_EFAULT;
2773             if (len < 0)
2774                 return -TARGET_EINVAL;
2775             lv = sizeof(lv);
2776             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2777             if (ret < 0)
2778                 return ret;
2779             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2780                 len = 1;
2781                 if (put_user_u32(len, optlen)
2782                     || put_user_u8(val, optval_addr))
2783                     return -TARGET_EFAULT;
2784             } else {
2785                 if (len > sizeof(int))
2786                     len = sizeof(int);
2787                 if (put_user_u32(len, optlen)
2788                     || put_user_u32(val, optval_addr))
2789                     return -TARGET_EFAULT;
2790             }
2791             break;
2792         default:
2793             ret = -TARGET_ENOPROTOOPT;
2794             break;
2795         }
2796         break;
2797     case SOL_IPV6:
2798         switch (optname) {
2799         case IPV6_MTU_DISCOVER:
2800         case IPV6_MTU:
2801         case IPV6_V6ONLY:
2802         case IPV6_RECVPKTINFO:
2803         case IPV6_UNICAST_HOPS:
2804         case IPV6_MULTICAST_HOPS:
2805         case IPV6_MULTICAST_LOOP:
2806         case IPV6_RECVERR:
2807         case IPV6_RECVHOPLIMIT:
2808         case IPV6_2292HOPLIMIT:
2809         case IPV6_CHECKSUM:
2810         case IPV6_ADDRFORM:
2811         case IPV6_2292PKTINFO:
2812         case IPV6_RECVTCLASS:
2813         case IPV6_RECVRTHDR:
2814         case IPV6_2292RTHDR:
2815         case IPV6_RECVHOPOPTS:
2816         case IPV6_2292HOPOPTS:
2817         case IPV6_RECVDSTOPTS:
2818         case IPV6_2292DSTOPTS:
2819         case IPV6_TCLASS:
2820         case IPV6_ADDR_PREFERENCES:
2821 #ifdef IPV6_RECVPATHMTU
2822         case IPV6_RECVPATHMTU:
2823 #endif
2824 #ifdef IPV6_TRANSPARENT
2825         case IPV6_TRANSPARENT:
2826 #endif
2827 #ifdef IPV6_FREEBIND
2828         case IPV6_FREEBIND:
2829 #endif
2830 #ifdef IPV6_RECVORIGDSTADDR
2831         case IPV6_RECVORIGDSTADDR:
2832 #endif
2833             if (get_user_u32(len, optlen))
2834                 return -TARGET_EFAULT;
2835             if (len < 0)
2836                 return -TARGET_EINVAL;
2837             lv = sizeof(lv);
2838             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2839             if (ret < 0)
2840                 return ret;
2841             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2842                 len = 1;
2843                 if (put_user_u32(len, optlen)
2844                     || put_user_u8(val, optval_addr))
2845                     return -TARGET_EFAULT;
2846             } else {
2847                 if (len > sizeof(int))
2848                     len = sizeof(int);
2849                 if (put_user_u32(len, optlen)
2850                     || put_user_u32(val, optval_addr))
2851                     return -TARGET_EFAULT;
2852             }
2853             break;
2854         default:
2855             ret = -TARGET_ENOPROTOOPT;
2856             break;
2857         }
2858         break;
2859 #ifdef SOL_NETLINK
2860     case SOL_NETLINK:
2861         switch (optname) {
2862         case NETLINK_PKTINFO:
2863         case NETLINK_BROADCAST_ERROR:
2864         case NETLINK_NO_ENOBUFS:
2865 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2866         case NETLINK_LISTEN_ALL_NSID:
2867         case NETLINK_CAP_ACK:
2868 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2869 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2870         case NETLINK_EXT_ACK:
2871 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2872 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2873         case NETLINK_GET_STRICT_CHK:
2874 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2875             if (get_user_u32(len, optlen)) {
2876                 return -TARGET_EFAULT;
2877             }
2878             if (len != sizeof(val)) {
2879                 return -TARGET_EINVAL;
2880             }
2881             lv = len;
2882             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2883             if (ret < 0) {
2884                 return ret;
2885             }
2886             if (put_user_u32(lv, optlen)
2887                 || put_user_u32(val, optval_addr)) {
2888                 return -TARGET_EFAULT;
2889             }
2890             break;
2891 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2892         case NETLINK_LIST_MEMBERSHIPS:
2893         {
2894             uint32_t *results;
2895             int i;
2896             if (get_user_u32(len, optlen)) {
2897                 return -TARGET_EFAULT;
2898             }
2899             if (len < 0) {
2900                 return -TARGET_EINVAL;
2901             }
2902             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2903             if (!results && len > 0) {
2904                 return -TARGET_EFAULT;
2905             }
2906             lv = len;
2907             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2908             if (ret < 0) {
2909                 unlock_user(results, optval_addr, 0);
2910                 return ret;
2911             }
2912             /* swap host endianess to target endianess. */
2913             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2914                 results[i] = tswap32(results[i]);
2915             }
2916             if (put_user_u32(lv, optlen)) {
2917                 return -TARGET_EFAULT;
2918             }
2919             unlock_user(results, optval_addr, 0);
2920             break;
2921         }
2922 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2923         default:
2924             goto unimplemented;
2925         }
2926         break;
2927 #endif /* SOL_NETLINK */
2928     default:
2929     unimplemented:
2930         qemu_log_mask(LOG_UNIMP,
2931                       "getsockopt level=%d optname=%d not yet supported\n",
2932                       level, optname);
2933         ret = -TARGET_EOPNOTSUPP;
2934         break;
2935     }
2936     return ret;
2937 }
2938 
2939 /* Convert target low/high pair representing file offset into the host
2940  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2941  * as the kernel doesn't handle them either.
2942  */
2943 static void target_to_host_low_high(abi_ulong tlow,
2944                                     abi_ulong thigh,
2945                                     unsigned long *hlow,
2946                                     unsigned long *hhigh)
2947 {
2948     uint64_t off = tlow |
2949         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2950         TARGET_LONG_BITS / 2;
2951 
2952     *hlow = off;
2953     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2954 }
2955 
2956 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2957                                 abi_ulong count, int copy)
2958 {
2959     struct target_iovec *target_vec;
2960     struct iovec *vec;
2961     abi_ulong total_len, max_len;
2962     int i;
2963     int err = 0;
2964     bool bad_address = false;
2965 
2966     if (count == 0) {
2967         errno = 0;
2968         return NULL;
2969     }
2970     if (count > IOV_MAX) {
2971         errno = EINVAL;
2972         return NULL;
2973     }
2974 
2975     vec = g_try_new0(struct iovec, count);
2976     if (vec == NULL) {
2977         errno = ENOMEM;
2978         return NULL;
2979     }
2980 
2981     target_vec = lock_user(VERIFY_READ, target_addr,
2982                            count * sizeof(struct target_iovec), 1);
2983     if (target_vec == NULL) {
2984         err = EFAULT;
2985         goto fail2;
2986     }
2987 
2988     /* ??? If host page size > target page size, this will result in a
2989        value larger than what we can actually support.  */
2990     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2991     total_len = 0;
2992 
2993     for (i = 0; i < count; i++) {
2994         abi_ulong base = tswapal(target_vec[i].iov_base);
2995         abi_long len = tswapal(target_vec[i].iov_len);
2996 
2997         if (len < 0) {
2998             err = EINVAL;
2999             goto fail;
3000         } else if (len == 0) {
3001             /* Zero length pointer is ignored.  */
3002             vec[i].iov_base = 0;
3003         } else {
3004             vec[i].iov_base = lock_user(type, base, len, copy);
3005             /* If the first buffer pointer is bad, this is a fault.  But
3006              * subsequent bad buffers will result in a partial write; this
3007              * is realized by filling the vector with null pointers and
3008              * zero lengths. */
3009             if (!vec[i].iov_base) {
3010                 if (i == 0) {
3011                     err = EFAULT;
3012                     goto fail;
3013                 } else {
3014                     bad_address = true;
3015                 }
3016             }
3017             if (bad_address) {
3018                 len = 0;
3019             }
3020             if (len > max_len - total_len) {
3021                 len = max_len - total_len;
3022             }
3023         }
3024         vec[i].iov_len = len;
3025         total_len += len;
3026     }
3027 
3028     unlock_user(target_vec, target_addr, 0);
3029     return vec;
3030 
3031  fail:
3032     while (--i >= 0) {
3033         if (tswapal(target_vec[i].iov_len) > 0) {
3034             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3035         }
3036     }
3037     unlock_user(target_vec, target_addr, 0);
3038  fail2:
3039     g_free(vec);
3040     errno = err;
3041     return NULL;
3042 }
3043 
3044 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3045                          abi_ulong count, int copy)
3046 {
3047     struct target_iovec *target_vec;
3048     int i;
3049 
3050     target_vec = lock_user(VERIFY_READ, target_addr,
3051                            count * sizeof(struct target_iovec), 1);
3052     if (target_vec) {
3053         for (i = 0; i < count; i++) {
3054             abi_ulong base = tswapal(target_vec[i].iov_base);
3055             abi_long len = tswapal(target_vec[i].iov_len);
3056             if (len < 0) {
3057                 break;
3058             }
3059             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3060         }
3061         unlock_user(target_vec, target_addr, 0);
3062     }
3063 
3064     g_free(vec);
3065 }
3066 
3067 static inline int target_to_host_sock_type(int *type)
3068 {
3069     int host_type = 0;
3070     int target_type = *type;
3071 
3072     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3073     case TARGET_SOCK_DGRAM:
3074         host_type = SOCK_DGRAM;
3075         break;
3076     case TARGET_SOCK_STREAM:
3077         host_type = SOCK_STREAM;
3078         break;
3079     default:
3080         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3081         break;
3082     }
3083     if (target_type & TARGET_SOCK_CLOEXEC) {
3084 #if defined(SOCK_CLOEXEC)
3085         host_type |= SOCK_CLOEXEC;
3086 #else
3087         return -TARGET_EINVAL;
3088 #endif
3089     }
3090     if (target_type & TARGET_SOCK_NONBLOCK) {
3091 #if defined(SOCK_NONBLOCK)
3092         host_type |= SOCK_NONBLOCK;
3093 #elif !defined(O_NONBLOCK)
3094         return -TARGET_EINVAL;
3095 #endif
3096     }
3097     *type = host_type;
3098     return 0;
3099 }
3100 
3101 /* Try to emulate socket type flags after socket creation.  */
3102 static int sock_flags_fixup(int fd, int target_type)
3103 {
3104 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3105     if (target_type & TARGET_SOCK_NONBLOCK) {
3106         int flags = fcntl(fd, F_GETFL);
3107         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3108             close(fd);
3109             return -TARGET_EINVAL;
3110         }
3111     }
3112 #endif
3113     return fd;
3114 }
3115 
3116 /* do_socket() Must return target values and target errnos. */
3117 static abi_long do_socket(int domain, int type, int protocol)
3118 {
3119     int target_type = type;
3120     int ret;
3121 
3122     ret = target_to_host_sock_type(&type);
3123     if (ret) {
3124         return ret;
3125     }
3126 
3127     if (domain == PF_NETLINK && !(
3128 #ifdef CONFIG_RTNETLINK
3129          protocol == NETLINK_ROUTE ||
3130 #endif
3131          protocol == NETLINK_KOBJECT_UEVENT ||
3132          protocol == NETLINK_AUDIT)) {
3133         return -TARGET_EPROTONOSUPPORT;
3134     }
3135 
3136     if (domain == AF_PACKET ||
3137         (domain == AF_INET && type == SOCK_PACKET)) {
3138         protocol = tswap16(protocol);
3139     }
3140 
3141     ret = get_errno(socket(domain, type, protocol));
3142     if (ret >= 0) {
3143         ret = sock_flags_fixup(ret, target_type);
3144         if (type == SOCK_PACKET) {
3145             /* Manage an obsolete case :
3146              * if socket type is SOCK_PACKET, bind by name
3147              */
3148             fd_trans_register(ret, &target_packet_trans);
3149         } else if (domain == PF_NETLINK) {
3150             switch (protocol) {
3151 #ifdef CONFIG_RTNETLINK
3152             case NETLINK_ROUTE:
3153                 fd_trans_register(ret, &target_netlink_route_trans);
3154                 break;
3155 #endif
3156             case NETLINK_KOBJECT_UEVENT:
3157                 /* nothing to do: messages are strings */
3158                 break;
3159             case NETLINK_AUDIT:
3160                 fd_trans_register(ret, &target_netlink_audit_trans);
3161                 break;
3162             default:
3163                 g_assert_not_reached();
3164             }
3165         }
3166     }
3167     return ret;
3168 }
3169 
3170 /* do_bind() Must return target values and target errnos. */
3171 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3172                         socklen_t addrlen)
3173 {
3174     void *addr;
3175     abi_long ret;
3176 
3177     if ((int)addrlen < 0) {
3178         return -TARGET_EINVAL;
3179     }
3180 
3181     addr = alloca(addrlen+1);
3182 
3183     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3184     if (ret)
3185         return ret;
3186 
3187     return get_errno(bind(sockfd, addr, addrlen));
3188 }
3189 
3190 /* do_connect() Must return target values and target errnos. */
3191 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3192                            socklen_t addrlen)
3193 {
3194     void *addr;
3195     abi_long ret;
3196 
3197     if ((int)addrlen < 0) {
3198         return -TARGET_EINVAL;
3199     }
3200 
3201     addr = alloca(addrlen+1);
3202 
3203     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3204     if (ret)
3205         return ret;
3206 
3207     return get_errno(safe_connect(sockfd, addr, addrlen));
3208 }
3209 
3210 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3211 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3212                                       int flags, int send)
3213 {
3214     abi_long ret, len;
3215     struct msghdr msg;
3216     abi_ulong count;
3217     struct iovec *vec;
3218     abi_ulong target_vec;
3219 
3220     if (msgp->msg_name) {
3221         msg.msg_namelen = tswap32(msgp->msg_namelen);
3222         msg.msg_name = alloca(msg.msg_namelen+1);
3223         ret = target_to_host_sockaddr(fd, msg.msg_name,
3224                                       tswapal(msgp->msg_name),
3225                                       msg.msg_namelen);
3226         if (ret == -TARGET_EFAULT) {
3227             /* For connected sockets msg_name and msg_namelen must
3228              * be ignored, so returning EFAULT immediately is wrong.
3229              * Instead, pass a bad msg_name to the host kernel, and
3230              * let it decide whether to return EFAULT or not.
3231              */
3232             msg.msg_name = (void *)-1;
3233         } else if (ret) {
3234             goto out2;
3235         }
3236     } else {
3237         msg.msg_name = NULL;
3238         msg.msg_namelen = 0;
3239     }
3240     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3241     msg.msg_control = alloca(msg.msg_controllen);
3242     memset(msg.msg_control, 0, msg.msg_controllen);
3243 
3244     msg.msg_flags = tswap32(msgp->msg_flags);
3245 
3246     count = tswapal(msgp->msg_iovlen);
3247     target_vec = tswapal(msgp->msg_iov);
3248 
3249     if (count > IOV_MAX) {
3250         /* sendrcvmsg returns a different errno for this condition than
3251          * readv/writev, so we must catch it here before lock_iovec() does.
3252          */
3253         ret = -TARGET_EMSGSIZE;
3254         goto out2;
3255     }
3256 
3257     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3258                      target_vec, count, send);
3259     if (vec == NULL) {
3260         ret = -host_to_target_errno(errno);
3261         goto out2;
3262     }
3263     msg.msg_iovlen = count;
3264     msg.msg_iov = vec;
3265 
3266     if (send) {
3267         if (fd_trans_target_to_host_data(fd)) {
3268             void *host_msg;
3269 
3270             host_msg = g_malloc(msg.msg_iov->iov_len);
3271             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3272             ret = fd_trans_target_to_host_data(fd)(host_msg,
3273                                                    msg.msg_iov->iov_len);
3274             if (ret >= 0) {
3275                 msg.msg_iov->iov_base = host_msg;
3276                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3277             }
3278             g_free(host_msg);
3279         } else {
3280             ret = target_to_host_cmsg(&msg, msgp);
3281             if (ret == 0) {
3282                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3283             }
3284         }
3285     } else {
3286         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3287         if (!is_error(ret)) {
3288             len = ret;
3289             if (fd_trans_host_to_target_data(fd)) {
3290                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3291                                                MIN(msg.msg_iov->iov_len, len));
3292             } else {
3293                 ret = host_to_target_cmsg(msgp, &msg);
3294             }
3295             if (!is_error(ret)) {
3296                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3297                 msgp->msg_flags = tswap32(msg.msg_flags);
3298                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3299                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3300                                     msg.msg_name, msg.msg_namelen);
3301                     if (ret) {
3302                         goto out;
3303                     }
3304                 }
3305 
3306                 ret = len;
3307             }
3308         }
3309     }
3310 
3311 out:
3312     unlock_iovec(vec, target_vec, count, !send);
3313 out2:
3314     return ret;
3315 }
3316 
3317 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3318                                int flags, int send)
3319 {
3320     abi_long ret;
3321     struct target_msghdr *msgp;
3322 
3323     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3324                           msgp,
3325                           target_msg,
3326                           send ? 1 : 0)) {
3327         return -TARGET_EFAULT;
3328     }
3329     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3330     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3331     return ret;
3332 }
3333 
3334 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3335  * so it might not have this *mmsg-specific flag either.
3336  */
3337 #ifndef MSG_WAITFORONE
3338 #define MSG_WAITFORONE 0x10000
3339 #endif
3340 
3341 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3342                                 unsigned int vlen, unsigned int flags,
3343                                 int send)
3344 {
3345     struct target_mmsghdr *mmsgp;
3346     abi_long ret = 0;
3347     int i;
3348 
3349     if (vlen > UIO_MAXIOV) {
3350         vlen = UIO_MAXIOV;
3351     }
3352 
3353     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3354     if (!mmsgp) {
3355         return -TARGET_EFAULT;
3356     }
3357 
3358     for (i = 0; i < vlen; i++) {
3359         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3360         if (is_error(ret)) {
3361             break;
3362         }
3363         mmsgp[i].msg_len = tswap32(ret);
3364         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3365         if (flags & MSG_WAITFORONE) {
3366             flags |= MSG_DONTWAIT;
3367         }
3368     }
3369 
3370     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3371 
3372     /* Return number of datagrams sent if we sent any at all;
3373      * otherwise return the error.
3374      */
3375     if (i) {
3376         return i;
3377     }
3378     return ret;
3379 }
3380 
3381 /* do_accept4() Must return target values and target errnos. */
3382 static abi_long do_accept4(int fd, abi_ulong target_addr,
3383                            abi_ulong target_addrlen_addr, int flags)
3384 {
3385     socklen_t addrlen, ret_addrlen;
3386     void *addr;
3387     abi_long ret;
3388     int host_flags;
3389 
3390     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3391 
3392     if (target_addr == 0) {
3393         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3394     }
3395 
3396     /* linux returns EFAULT if addrlen pointer is invalid */
3397     if (get_user_u32(addrlen, target_addrlen_addr))
3398         return -TARGET_EFAULT;
3399 
3400     if ((int)addrlen < 0) {
3401         return -TARGET_EINVAL;
3402     }
3403 
3404     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3405         return -TARGET_EFAULT;
3406     }
3407 
3408     addr = alloca(addrlen);
3409 
3410     ret_addrlen = addrlen;
3411     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3412     if (!is_error(ret)) {
3413         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3414         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3415             ret = -TARGET_EFAULT;
3416         }
3417     }
3418     return ret;
3419 }
3420 
3421 /* do_getpeername() Must return target values and target errnos. */
3422 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3423                                abi_ulong target_addrlen_addr)
3424 {
3425     socklen_t addrlen, ret_addrlen;
3426     void *addr;
3427     abi_long ret;
3428 
3429     if (get_user_u32(addrlen, target_addrlen_addr))
3430         return -TARGET_EFAULT;
3431 
3432     if ((int)addrlen < 0) {
3433         return -TARGET_EINVAL;
3434     }
3435 
3436     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3437         return -TARGET_EFAULT;
3438     }
3439 
3440     addr = alloca(addrlen);
3441 
3442     ret_addrlen = addrlen;
3443     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3444     if (!is_error(ret)) {
3445         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3446         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3447             ret = -TARGET_EFAULT;
3448         }
3449     }
3450     return ret;
3451 }
3452 
3453 /* do_getsockname() Must return target values and target errnos. */
3454 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3455                                abi_ulong target_addrlen_addr)
3456 {
3457     socklen_t addrlen, ret_addrlen;
3458     void *addr;
3459     abi_long ret;
3460 
3461     if (get_user_u32(addrlen, target_addrlen_addr))
3462         return -TARGET_EFAULT;
3463 
3464     if ((int)addrlen < 0) {
3465         return -TARGET_EINVAL;
3466     }
3467 
3468     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3469         return -TARGET_EFAULT;
3470     }
3471 
3472     addr = alloca(addrlen);
3473 
3474     ret_addrlen = addrlen;
3475     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3476     if (!is_error(ret)) {
3477         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3478         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3479             ret = -TARGET_EFAULT;
3480         }
3481     }
3482     return ret;
3483 }
3484 
3485 /* do_socketpair() Must return target values and target errnos. */
3486 static abi_long do_socketpair(int domain, int type, int protocol,
3487                               abi_ulong target_tab_addr)
3488 {
3489     int tab[2];
3490     abi_long ret;
3491 
3492     target_to_host_sock_type(&type);
3493 
3494     ret = get_errno(socketpair(domain, type, protocol, tab));
3495     if (!is_error(ret)) {
3496         if (put_user_s32(tab[0], target_tab_addr)
3497             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3498             ret = -TARGET_EFAULT;
3499     }
3500     return ret;
3501 }
3502 
3503 /* do_sendto() Must return target values and target errnos. */
3504 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3505                           abi_ulong target_addr, socklen_t addrlen)
3506 {
3507     void *addr;
3508     void *host_msg;
3509     void *copy_msg = NULL;
3510     abi_long ret;
3511 
3512     if ((int)addrlen < 0) {
3513         return -TARGET_EINVAL;
3514     }
3515 
3516     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3517     if (!host_msg)
3518         return -TARGET_EFAULT;
3519     if (fd_trans_target_to_host_data(fd)) {
3520         copy_msg = host_msg;
3521         host_msg = g_malloc(len);
3522         memcpy(host_msg, copy_msg, len);
3523         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3524         if (ret < 0) {
3525             goto fail;
3526         }
3527     }
3528     if (target_addr) {
3529         addr = alloca(addrlen+1);
3530         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3531         if (ret) {
3532             goto fail;
3533         }
3534         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3535     } else {
3536         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3537     }
3538 fail:
3539     if (copy_msg) {
3540         g_free(host_msg);
3541         host_msg = copy_msg;
3542     }
3543     unlock_user(host_msg, msg, 0);
3544     return ret;
3545 }
3546 
3547 /* do_recvfrom() Must return target values and target errnos. */
3548 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3549                             abi_ulong target_addr,
3550                             abi_ulong target_addrlen)
3551 {
3552     socklen_t addrlen, ret_addrlen;
3553     void *addr;
3554     void *host_msg;
3555     abi_long ret;
3556 
3557     if (!msg) {
3558         host_msg = NULL;
3559     } else {
3560         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3561         if (!host_msg) {
3562             return -TARGET_EFAULT;
3563         }
3564     }
3565     if (target_addr) {
3566         if (get_user_u32(addrlen, target_addrlen)) {
3567             ret = -TARGET_EFAULT;
3568             goto fail;
3569         }
3570         if ((int)addrlen < 0) {
3571             ret = -TARGET_EINVAL;
3572             goto fail;
3573         }
3574         addr = alloca(addrlen);
3575         ret_addrlen = addrlen;
3576         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3577                                       addr, &ret_addrlen));
3578     } else {
3579         addr = NULL; /* To keep compiler quiet.  */
3580         addrlen = 0; /* To keep compiler quiet.  */
3581         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3582     }
3583     if (!is_error(ret)) {
3584         if (fd_trans_host_to_target_data(fd)) {
3585             abi_long trans;
3586             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3587             if (is_error(trans)) {
3588                 ret = trans;
3589                 goto fail;
3590             }
3591         }
3592         if (target_addr) {
3593             host_to_target_sockaddr(target_addr, addr,
3594                                     MIN(addrlen, ret_addrlen));
3595             if (put_user_u32(ret_addrlen, target_addrlen)) {
3596                 ret = -TARGET_EFAULT;
3597                 goto fail;
3598             }
3599         }
3600         unlock_user(host_msg, msg, len);
3601     } else {
3602 fail:
3603         unlock_user(host_msg, msg, 0);
3604     }
3605     return ret;
3606 }
3607 
3608 #ifdef TARGET_NR_socketcall
3609 /* do_socketcall() must return target values and target errnos. */
3610 static abi_long do_socketcall(int num, abi_ulong vptr)
3611 {
3612     static const unsigned nargs[] = { /* number of arguments per operation */
3613         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3614         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3615         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3616         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3617         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3618         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3619         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3620         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3621         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3622         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3623         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3624         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3625         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3626         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3627         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3628         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3629         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3630         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3631         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3632         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3633     };
3634     abi_long a[6]; /* max 6 args */
3635     unsigned i;
3636 
3637     /* check the range of the first argument num */
3638     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3639     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3640         return -TARGET_EINVAL;
3641     }
3642     /* ensure we have space for args */
3643     if (nargs[num] > ARRAY_SIZE(a)) {
3644         return -TARGET_EINVAL;
3645     }
3646     /* collect the arguments in a[] according to nargs[] */
3647     for (i = 0; i < nargs[num]; ++i) {
3648         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3649             return -TARGET_EFAULT;
3650         }
3651     }
3652     /* now when we have the args, invoke the appropriate underlying function */
3653     switch (num) {
3654     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3655         return do_socket(a[0], a[1], a[2]);
3656     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3657         return do_bind(a[0], a[1], a[2]);
3658     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3659         return do_connect(a[0], a[1], a[2]);
3660     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3661         return get_errno(listen(a[0], a[1]));
3662     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3663         return do_accept4(a[0], a[1], a[2], 0);
3664     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3665         return do_getsockname(a[0], a[1], a[2]);
3666     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3667         return do_getpeername(a[0], a[1], a[2]);
3668     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3669         return do_socketpair(a[0], a[1], a[2], a[3]);
3670     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3671         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3672     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3673         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3674     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3675         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3676     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3677         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3678     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3679         return get_errno(shutdown(a[0], a[1]));
3680     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3681         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3682     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3683         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3684     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3685         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3686     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3687         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3688     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3689         return do_accept4(a[0], a[1], a[2], a[3]);
3690     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3691         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3692     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3693         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3694     default:
3695         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3696         return -TARGET_EINVAL;
3697     }
3698 }
3699 #endif
3700 
3701 #define N_SHM_REGIONS	32
3702 
3703 static struct shm_region {
3704     abi_ulong start;
3705     abi_ulong size;
3706     bool in_use;
3707 } shm_regions[N_SHM_REGIONS];
3708 
3709 #ifndef TARGET_SEMID64_DS
3710 /* asm-generic version of this struct */
3711 struct target_semid64_ds
3712 {
3713   struct target_ipc_perm sem_perm;
3714   abi_ulong sem_otime;
3715 #if TARGET_ABI_BITS == 32
3716   abi_ulong __unused1;
3717 #endif
3718   abi_ulong sem_ctime;
3719 #if TARGET_ABI_BITS == 32
3720   abi_ulong __unused2;
3721 #endif
3722   abi_ulong sem_nsems;
3723   abi_ulong __unused3;
3724   abi_ulong __unused4;
3725 };
3726 #endif
3727 
3728 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3729                                                abi_ulong target_addr)
3730 {
3731     struct target_ipc_perm *target_ip;
3732     struct target_semid64_ds *target_sd;
3733 
3734     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3735         return -TARGET_EFAULT;
3736     target_ip = &(target_sd->sem_perm);
3737     host_ip->__key = tswap32(target_ip->__key);
3738     host_ip->uid = tswap32(target_ip->uid);
3739     host_ip->gid = tswap32(target_ip->gid);
3740     host_ip->cuid = tswap32(target_ip->cuid);
3741     host_ip->cgid = tswap32(target_ip->cgid);
3742 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3743     host_ip->mode = tswap32(target_ip->mode);
3744 #else
3745     host_ip->mode = tswap16(target_ip->mode);
3746 #endif
3747 #if defined(TARGET_PPC)
3748     host_ip->__seq = tswap32(target_ip->__seq);
3749 #else
3750     host_ip->__seq = tswap16(target_ip->__seq);
3751 #endif
3752     unlock_user_struct(target_sd, target_addr, 0);
3753     return 0;
3754 }
3755 
3756 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3757                                                struct ipc_perm *host_ip)
3758 {
3759     struct target_ipc_perm *target_ip;
3760     struct target_semid64_ds *target_sd;
3761 
3762     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3763         return -TARGET_EFAULT;
3764     target_ip = &(target_sd->sem_perm);
3765     target_ip->__key = tswap32(host_ip->__key);
3766     target_ip->uid = tswap32(host_ip->uid);
3767     target_ip->gid = tswap32(host_ip->gid);
3768     target_ip->cuid = tswap32(host_ip->cuid);
3769     target_ip->cgid = tswap32(host_ip->cgid);
3770 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3771     target_ip->mode = tswap32(host_ip->mode);
3772 #else
3773     target_ip->mode = tswap16(host_ip->mode);
3774 #endif
3775 #if defined(TARGET_PPC)
3776     target_ip->__seq = tswap32(host_ip->__seq);
3777 #else
3778     target_ip->__seq = tswap16(host_ip->__seq);
3779 #endif
3780     unlock_user_struct(target_sd, target_addr, 1);
3781     return 0;
3782 }
3783 
3784 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3785                                                abi_ulong target_addr)
3786 {
3787     struct target_semid64_ds *target_sd;
3788 
3789     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3790         return -TARGET_EFAULT;
3791     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3792         return -TARGET_EFAULT;
3793     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3794     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3795     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3796     unlock_user_struct(target_sd, target_addr, 0);
3797     return 0;
3798 }
3799 
3800 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3801                                                struct semid_ds *host_sd)
3802 {
3803     struct target_semid64_ds *target_sd;
3804 
3805     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3806         return -TARGET_EFAULT;
3807     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3808         return -TARGET_EFAULT;
3809     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3810     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3811     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3812     unlock_user_struct(target_sd, target_addr, 1);
3813     return 0;
3814 }
3815 
3816 struct target_seminfo {
3817     int semmap;
3818     int semmni;
3819     int semmns;
3820     int semmnu;
3821     int semmsl;
3822     int semopm;
3823     int semume;
3824     int semusz;
3825     int semvmx;
3826     int semaem;
3827 };
3828 
3829 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3830                                               struct seminfo *host_seminfo)
3831 {
3832     struct target_seminfo *target_seminfo;
3833     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3834         return -TARGET_EFAULT;
3835     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3836     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3837     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3838     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3839     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3840     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3841     __put_user(host_seminfo->semume, &target_seminfo->semume);
3842     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3843     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3844     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3845     unlock_user_struct(target_seminfo, target_addr, 1);
3846     return 0;
3847 }
3848 
3849 union semun {
3850 	int val;
3851 	struct semid_ds *buf;
3852 	unsigned short *array;
3853 	struct seminfo *__buf;
3854 };
3855 
3856 union target_semun {
3857 	int val;
3858 	abi_ulong buf;
3859 	abi_ulong array;
3860 	abi_ulong __buf;
3861 };
3862 
3863 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3864                                                abi_ulong target_addr)
3865 {
3866     int nsems;
3867     unsigned short *array;
3868     union semun semun;
3869     struct semid_ds semid_ds;
3870     int i, ret;
3871 
3872     semun.buf = &semid_ds;
3873 
3874     ret = semctl(semid, 0, IPC_STAT, semun);
3875     if (ret == -1)
3876         return get_errno(ret);
3877 
3878     nsems = semid_ds.sem_nsems;
3879 
3880     *host_array = g_try_new(unsigned short, nsems);
3881     if (!*host_array) {
3882         return -TARGET_ENOMEM;
3883     }
3884     array = lock_user(VERIFY_READ, target_addr,
3885                       nsems*sizeof(unsigned short), 1);
3886     if (!array) {
3887         g_free(*host_array);
3888         return -TARGET_EFAULT;
3889     }
3890 
3891     for(i=0; i<nsems; i++) {
3892         __get_user((*host_array)[i], &array[i]);
3893     }
3894     unlock_user(array, target_addr, 0);
3895 
3896     return 0;
3897 }
3898 
3899 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3900                                                unsigned short **host_array)
3901 {
3902     int nsems;
3903     unsigned short *array;
3904     union semun semun;
3905     struct semid_ds semid_ds;
3906     int i, ret;
3907 
3908     semun.buf = &semid_ds;
3909 
3910     ret = semctl(semid, 0, IPC_STAT, semun);
3911     if (ret == -1)
3912         return get_errno(ret);
3913 
3914     nsems = semid_ds.sem_nsems;
3915 
3916     array = lock_user(VERIFY_WRITE, target_addr,
3917                       nsems*sizeof(unsigned short), 0);
3918     if (!array)
3919         return -TARGET_EFAULT;
3920 
3921     for(i=0; i<nsems; i++) {
3922         __put_user((*host_array)[i], &array[i]);
3923     }
3924     g_free(*host_array);
3925     unlock_user(array, target_addr, 1);
3926 
3927     return 0;
3928 }
3929 
3930 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3931                                  abi_ulong target_arg)
3932 {
3933     union target_semun target_su = { .buf = target_arg };
3934     union semun arg;
3935     struct semid_ds dsarg;
3936     unsigned short *array = NULL;
3937     struct seminfo seminfo;
3938     abi_long ret = -TARGET_EINVAL;
3939     abi_long err;
3940     cmd &= 0xff;
3941 
3942     switch( cmd ) {
3943 	case GETVAL:
3944 	case SETVAL:
3945             /* In 64 bit cross-endian situations, we will erroneously pick up
3946              * the wrong half of the union for the "val" element.  To rectify
3947              * this, the entire 8-byte structure is byteswapped, followed by
3948 	     * a swap of the 4 byte val field. In other cases, the data is
3949 	     * already in proper host byte order. */
3950 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3951 		target_su.buf = tswapal(target_su.buf);
3952 		arg.val = tswap32(target_su.val);
3953 	    } else {
3954 		arg.val = target_su.val;
3955 	    }
3956             ret = get_errno(semctl(semid, semnum, cmd, arg));
3957             break;
3958 	case GETALL:
3959 	case SETALL:
3960             err = target_to_host_semarray(semid, &array, target_su.array);
3961             if (err)
3962                 return err;
3963             arg.array = array;
3964             ret = get_errno(semctl(semid, semnum, cmd, arg));
3965             err = host_to_target_semarray(semid, target_su.array, &array);
3966             if (err)
3967                 return err;
3968             break;
3969 	case IPC_STAT:
3970 	case IPC_SET:
3971 	case SEM_STAT:
3972             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3973             if (err)
3974                 return err;
3975             arg.buf = &dsarg;
3976             ret = get_errno(semctl(semid, semnum, cmd, arg));
3977             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3978             if (err)
3979                 return err;
3980             break;
3981 	case IPC_INFO:
3982 	case SEM_INFO:
3983             arg.__buf = &seminfo;
3984             ret = get_errno(semctl(semid, semnum, cmd, arg));
3985             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3986             if (err)
3987                 return err;
3988             break;
3989 	case IPC_RMID:
3990 	case GETPID:
3991 	case GETNCNT:
3992 	case GETZCNT:
3993             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3994             break;
3995     }
3996 
3997     return ret;
3998 }
3999 
4000 struct target_sembuf {
4001     unsigned short sem_num;
4002     short sem_op;
4003     short sem_flg;
4004 };
4005 
4006 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4007                                              abi_ulong target_addr,
4008                                              unsigned nsops)
4009 {
4010     struct target_sembuf *target_sembuf;
4011     int i;
4012 
4013     target_sembuf = lock_user(VERIFY_READ, target_addr,
4014                               nsops*sizeof(struct target_sembuf), 1);
4015     if (!target_sembuf)
4016         return -TARGET_EFAULT;
4017 
4018     for(i=0; i<nsops; i++) {
4019         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4020         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4021         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4022     }
4023 
4024     unlock_user(target_sembuf, target_addr, 0);
4025 
4026     return 0;
4027 }
4028 
4029 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4030     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4031 
4032 /*
4033  * This macro is required to handle the s390 variants, which passes the
4034  * arguments in a different order than default.
4035  */
4036 #ifdef __s390x__
4037 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4038   (__nsops), (__timeout), (__sops)
4039 #else
4040 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4041   (__nsops), 0, (__sops), (__timeout)
4042 #endif
4043 
4044 static inline abi_long do_semtimedop(int semid,
4045                                      abi_long ptr,
4046                                      unsigned nsops,
4047                                      abi_long timeout, bool time64)
4048 {
4049     struct sembuf *sops;
4050     struct timespec ts, *pts = NULL;
4051     abi_long ret;
4052 
4053     if (timeout) {
4054         pts = &ts;
4055         if (time64) {
4056             if (target_to_host_timespec64(pts, timeout)) {
4057                 return -TARGET_EFAULT;
4058             }
4059         } else {
4060             if (target_to_host_timespec(pts, timeout)) {
4061                 return -TARGET_EFAULT;
4062             }
4063         }
4064     }
4065 
4066     if (nsops > TARGET_SEMOPM) {
4067         return -TARGET_E2BIG;
4068     }
4069 
4070     sops = g_new(struct sembuf, nsops);
4071 
4072     if (target_to_host_sembuf(sops, ptr, nsops)) {
4073         g_free(sops);
4074         return -TARGET_EFAULT;
4075     }
4076 
4077     ret = -TARGET_ENOSYS;
4078 #ifdef __NR_semtimedop
4079     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4080 #endif
4081 #ifdef __NR_ipc
4082     if (ret == -TARGET_ENOSYS) {
4083         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4084                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4085     }
4086 #endif
4087     g_free(sops);
4088     return ret;
4089 }
4090 #endif
4091 
4092 struct target_msqid_ds
4093 {
4094     struct target_ipc_perm msg_perm;
4095     abi_ulong msg_stime;
4096 #if TARGET_ABI_BITS == 32
4097     abi_ulong __unused1;
4098 #endif
4099     abi_ulong msg_rtime;
4100 #if TARGET_ABI_BITS == 32
4101     abi_ulong __unused2;
4102 #endif
4103     abi_ulong msg_ctime;
4104 #if TARGET_ABI_BITS == 32
4105     abi_ulong __unused3;
4106 #endif
4107     abi_ulong __msg_cbytes;
4108     abi_ulong msg_qnum;
4109     abi_ulong msg_qbytes;
4110     abi_ulong msg_lspid;
4111     abi_ulong msg_lrpid;
4112     abi_ulong __unused4;
4113     abi_ulong __unused5;
4114 };
4115 
4116 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4117                                                abi_ulong target_addr)
4118 {
4119     struct target_msqid_ds *target_md;
4120 
4121     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4122         return -TARGET_EFAULT;
4123     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4124         return -TARGET_EFAULT;
4125     host_md->msg_stime = tswapal(target_md->msg_stime);
4126     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4127     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4128     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4129     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4130     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4131     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4132     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4133     unlock_user_struct(target_md, target_addr, 0);
4134     return 0;
4135 }
4136 
4137 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4138                                                struct msqid_ds *host_md)
4139 {
4140     struct target_msqid_ds *target_md;
4141 
4142     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4143         return -TARGET_EFAULT;
4144     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4145         return -TARGET_EFAULT;
4146     target_md->msg_stime = tswapal(host_md->msg_stime);
4147     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4148     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4149     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4150     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4151     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4152     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4153     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4154     unlock_user_struct(target_md, target_addr, 1);
4155     return 0;
4156 }
4157 
4158 struct target_msginfo {
4159     int msgpool;
4160     int msgmap;
4161     int msgmax;
4162     int msgmnb;
4163     int msgmni;
4164     int msgssz;
4165     int msgtql;
4166     unsigned short int msgseg;
4167 };
4168 
4169 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4170                                               struct msginfo *host_msginfo)
4171 {
4172     struct target_msginfo *target_msginfo;
4173     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4174         return -TARGET_EFAULT;
4175     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4176     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4177     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4178     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4179     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4180     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4181     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4182     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4183     unlock_user_struct(target_msginfo, target_addr, 1);
4184     return 0;
4185 }
4186 
4187 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4188 {
4189     struct msqid_ds dsarg;
4190     struct msginfo msginfo;
4191     abi_long ret = -TARGET_EINVAL;
4192 
4193     cmd &= 0xff;
4194 
4195     switch (cmd) {
4196     case IPC_STAT:
4197     case IPC_SET:
4198     case MSG_STAT:
4199         if (target_to_host_msqid_ds(&dsarg,ptr))
4200             return -TARGET_EFAULT;
4201         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4202         if (host_to_target_msqid_ds(ptr,&dsarg))
4203             return -TARGET_EFAULT;
4204         break;
4205     case IPC_RMID:
4206         ret = get_errno(msgctl(msgid, cmd, NULL));
4207         break;
4208     case IPC_INFO:
4209     case MSG_INFO:
4210         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4211         if (host_to_target_msginfo(ptr, &msginfo))
4212             return -TARGET_EFAULT;
4213         break;
4214     }
4215 
4216     return ret;
4217 }
4218 
4219 struct target_msgbuf {
4220     abi_long mtype;
4221     char	mtext[1];
4222 };
4223 
4224 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4225                                  ssize_t msgsz, int msgflg)
4226 {
4227     struct target_msgbuf *target_mb;
4228     struct msgbuf *host_mb;
4229     abi_long ret = 0;
4230 
4231     if (msgsz < 0) {
4232         return -TARGET_EINVAL;
4233     }
4234 
4235     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4236         return -TARGET_EFAULT;
4237     host_mb = g_try_malloc(msgsz + sizeof(long));
4238     if (!host_mb) {
4239         unlock_user_struct(target_mb, msgp, 0);
4240         return -TARGET_ENOMEM;
4241     }
4242     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4243     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4244     ret = -TARGET_ENOSYS;
4245 #ifdef __NR_msgsnd
4246     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4247 #endif
4248 #ifdef __NR_ipc
4249     if (ret == -TARGET_ENOSYS) {
4250 #ifdef __s390x__
4251         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4252                                  host_mb));
4253 #else
4254         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4255                                  host_mb, 0));
4256 #endif
4257     }
4258 #endif
4259     g_free(host_mb);
4260     unlock_user_struct(target_mb, msgp, 0);
4261 
4262     return ret;
4263 }
4264 
4265 #ifdef __NR_ipc
4266 #if defined(__sparc__)
4267 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4268 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4269 #elif defined(__s390x__)
4270 /* The s390 sys_ipc variant has only five parameters.  */
4271 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4272     ((long int[]){(long int)__msgp, __msgtyp})
4273 #else
4274 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4275     ((long int[]){(long int)__msgp, __msgtyp}), 0
4276 #endif
4277 #endif
4278 
4279 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4280                                  ssize_t msgsz, abi_long msgtyp,
4281                                  int msgflg)
4282 {
4283     struct target_msgbuf *target_mb;
4284     char *target_mtext;
4285     struct msgbuf *host_mb;
4286     abi_long ret = 0;
4287 
4288     if (msgsz < 0) {
4289         return -TARGET_EINVAL;
4290     }
4291 
4292     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4293         return -TARGET_EFAULT;
4294 
4295     host_mb = g_try_malloc(msgsz + sizeof(long));
4296     if (!host_mb) {
4297         ret = -TARGET_ENOMEM;
4298         goto end;
4299     }
4300     ret = -TARGET_ENOSYS;
4301 #ifdef __NR_msgrcv
4302     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4303 #endif
4304 #ifdef __NR_ipc
4305     if (ret == -TARGET_ENOSYS) {
4306         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4307                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4308     }
4309 #endif
4310 
4311     if (ret > 0) {
4312         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4313         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4314         if (!target_mtext) {
4315             ret = -TARGET_EFAULT;
4316             goto end;
4317         }
4318         memcpy(target_mb->mtext, host_mb->mtext, ret);
4319         unlock_user(target_mtext, target_mtext_addr, ret);
4320     }
4321 
4322     target_mb->mtype = tswapal(host_mb->mtype);
4323 
4324 end:
4325     if (target_mb)
4326         unlock_user_struct(target_mb, msgp, 1);
4327     g_free(host_mb);
4328     return ret;
4329 }
4330 
4331 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4332                                                abi_ulong target_addr)
4333 {
4334     struct target_shmid_ds *target_sd;
4335 
4336     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4337         return -TARGET_EFAULT;
4338     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4339         return -TARGET_EFAULT;
4340     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4341     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4342     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4343     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4344     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4345     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4346     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4347     unlock_user_struct(target_sd, target_addr, 0);
4348     return 0;
4349 }
4350 
4351 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4352                                                struct shmid_ds *host_sd)
4353 {
4354     struct target_shmid_ds *target_sd;
4355 
4356     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4357         return -TARGET_EFAULT;
4358     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4359         return -TARGET_EFAULT;
4360     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4361     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4362     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4363     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4364     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4365     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4366     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4367     unlock_user_struct(target_sd, target_addr, 1);
4368     return 0;
4369 }
4370 
4371 struct  target_shminfo {
4372     abi_ulong shmmax;
4373     abi_ulong shmmin;
4374     abi_ulong shmmni;
4375     abi_ulong shmseg;
4376     abi_ulong shmall;
4377 };
4378 
4379 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4380                                               struct shminfo *host_shminfo)
4381 {
4382     struct target_shminfo *target_shminfo;
4383     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4384         return -TARGET_EFAULT;
4385     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4386     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4387     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4388     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4389     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4390     unlock_user_struct(target_shminfo, target_addr, 1);
4391     return 0;
4392 }
4393 
4394 struct target_shm_info {
4395     int used_ids;
4396     abi_ulong shm_tot;
4397     abi_ulong shm_rss;
4398     abi_ulong shm_swp;
4399     abi_ulong swap_attempts;
4400     abi_ulong swap_successes;
4401 };
4402 
4403 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4404                                                struct shm_info *host_shm_info)
4405 {
4406     struct target_shm_info *target_shm_info;
4407     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4408         return -TARGET_EFAULT;
4409     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4410     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4411     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4412     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4413     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4414     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4415     unlock_user_struct(target_shm_info, target_addr, 1);
4416     return 0;
4417 }
4418 
4419 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4420 {
4421     struct shmid_ds dsarg;
4422     struct shminfo shminfo;
4423     struct shm_info shm_info;
4424     abi_long ret = -TARGET_EINVAL;
4425 
4426     cmd &= 0xff;
4427 
4428     switch(cmd) {
4429     case IPC_STAT:
4430     case IPC_SET:
4431     case SHM_STAT:
4432         if (target_to_host_shmid_ds(&dsarg, buf))
4433             return -TARGET_EFAULT;
4434         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4435         if (host_to_target_shmid_ds(buf, &dsarg))
4436             return -TARGET_EFAULT;
4437         break;
4438     case IPC_INFO:
4439         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4440         if (host_to_target_shminfo(buf, &shminfo))
4441             return -TARGET_EFAULT;
4442         break;
4443     case SHM_INFO:
4444         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4445         if (host_to_target_shm_info(buf, &shm_info))
4446             return -TARGET_EFAULT;
4447         break;
4448     case IPC_RMID:
4449     case SHM_LOCK:
4450     case SHM_UNLOCK:
4451         ret = get_errno(shmctl(shmid, cmd, NULL));
4452         break;
4453     }
4454 
4455     return ret;
4456 }
4457 
4458 #ifndef TARGET_FORCE_SHMLBA
4459 /* For most architectures, SHMLBA is the same as the page size;
4460  * some architectures have larger values, in which case they should
4461  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4462  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4463  * and defining its own value for SHMLBA.
4464  *
4465  * The kernel also permits SHMLBA to be set by the architecture to a
4466  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4467  * this means that addresses are rounded to the large size if
4468  * SHM_RND is set but addresses not aligned to that size are not rejected
4469  * as long as they are at least page-aligned. Since the only architecture
4470  * which uses this is ia64 this code doesn't provide for that oddity.
4471  */
4472 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4473 {
4474     return TARGET_PAGE_SIZE;
4475 }
4476 #endif
4477 
4478 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4479                                  int shmid, abi_ulong shmaddr, int shmflg)
4480 {
4481     CPUState *cpu = env_cpu(cpu_env);
4482     abi_long raddr;
4483     void *host_raddr;
4484     struct shmid_ds shm_info;
4485     int i,ret;
4486     abi_ulong shmlba;
4487 
4488     /* shmat pointers are always untagged */
4489 
4490     /* find out the length of the shared memory segment */
4491     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4492     if (is_error(ret)) {
4493         /* can't get length, bail out */
4494         return ret;
4495     }
4496 
4497     shmlba = target_shmlba(cpu_env);
4498 
4499     if (shmaddr & (shmlba - 1)) {
4500         if (shmflg & SHM_RND) {
4501             shmaddr &= ~(shmlba - 1);
4502         } else {
4503             return -TARGET_EINVAL;
4504         }
4505     }
4506     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4507         return -TARGET_EINVAL;
4508     }
4509 
4510     mmap_lock();
4511 
4512     /*
4513      * We're mapping shared memory, so ensure we generate code for parallel
4514      * execution and flush old translations.  This will work up to the level
4515      * supported by the host -- anything that requires EXCP_ATOMIC will not
4516      * be atomic with respect to an external process.
4517      */
4518     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4519         cpu->tcg_cflags |= CF_PARALLEL;
4520         tb_flush(cpu);
4521     }
4522 
4523     if (shmaddr)
4524         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4525     else {
4526         abi_ulong mmap_start;
4527 
4528         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4529         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4530 
4531         if (mmap_start == -1) {
4532             errno = ENOMEM;
4533             host_raddr = (void *)-1;
4534         } else
4535             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4536                                shmflg | SHM_REMAP);
4537     }
4538 
4539     if (host_raddr == (void *)-1) {
4540         mmap_unlock();
4541         return get_errno((long)host_raddr);
4542     }
4543     raddr=h2g((unsigned long)host_raddr);
4544 
4545     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4546                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4547                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4548 
4549     for (i = 0; i < N_SHM_REGIONS; i++) {
4550         if (!shm_regions[i].in_use) {
4551             shm_regions[i].in_use = true;
4552             shm_regions[i].start = raddr;
4553             shm_regions[i].size = shm_info.shm_segsz;
4554             break;
4555         }
4556     }
4557 
4558     mmap_unlock();
4559     return raddr;
4560 
4561 }
4562 
4563 static inline abi_long do_shmdt(abi_ulong shmaddr)
4564 {
4565     int i;
4566     abi_long rv;
4567 
4568     /* shmdt pointers are always untagged */
4569 
4570     mmap_lock();
4571 
4572     for (i = 0; i < N_SHM_REGIONS; ++i) {
4573         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4574             shm_regions[i].in_use = false;
4575             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4576             break;
4577         }
4578     }
4579     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4580 
4581     mmap_unlock();
4582 
4583     return rv;
4584 }
4585 
4586 #ifdef TARGET_NR_ipc
4587 /* ??? This only works with linear mappings.  */
4588 /* do_ipc() must return target values and target errnos. */
4589 static abi_long do_ipc(CPUArchState *cpu_env,
4590                        unsigned int call, abi_long first,
4591                        abi_long second, abi_long third,
4592                        abi_long ptr, abi_long fifth)
4593 {
4594     int version;
4595     abi_long ret = 0;
4596 
4597     version = call >> 16;
4598     call &= 0xffff;
4599 
4600     switch (call) {
4601     case IPCOP_semop:
4602         ret = do_semtimedop(first, ptr, second, 0, false);
4603         break;
4604     case IPCOP_semtimedop:
4605     /*
4606      * The s390 sys_ipc variant has only five parameters instead of six
4607      * (as for default variant) and the only difference is the handling of
4608      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4609      * to a struct timespec where the generic variant uses fifth parameter.
4610      */
4611 #if defined(TARGET_S390X)
4612         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4613 #else
4614         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4615 #endif
4616         break;
4617 
4618     case IPCOP_semget:
4619         ret = get_errno(semget(first, second, third));
4620         break;
4621 
4622     case IPCOP_semctl: {
4623         /* The semun argument to semctl is passed by value, so dereference the
4624          * ptr argument. */
4625         abi_ulong atptr;
4626         get_user_ual(atptr, ptr);
4627         ret = do_semctl(first, second, third, atptr);
4628         break;
4629     }
4630 
4631     case IPCOP_msgget:
4632         ret = get_errno(msgget(first, second));
4633         break;
4634 
4635     case IPCOP_msgsnd:
4636         ret = do_msgsnd(first, ptr, second, third);
4637         break;
4638 
4639     case IPCOP_msgctl:
4640         ret = do_msgctl(first, second, ptr);
4641         break;
4642 
4643     case IPCOP_msgrcv:
4644         switch (version) {
4645         case 0:
4646             {
4647                 struct target_ipc_kludge {
4648                     abi_long msgp;
4649                     abi_long msgtyp;
4650                 } *tmp;
4651 
4652                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4653                     ret = -TARGET_EFAULT;
4654                     break;
4655                 }
4656 
4657                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4658 
4659                 unlock_user_struct(tmp, ptr, 0);
4660                 break;
4661             }
4662         default:
4663             ret = do_msgrcv(first, ptr, second, fifth, third);
4664         }
4665         break;
4666 
4667     case IPCOP_shmat:
4668         switch (version) {
4669         default:
4670         {
4671             abi_ulong raddr;
4672             raddr = do_shmat(cpu_env, first, ptr, second);
4673             if (is_error(raddr))
4674                 return get_errno(raddr);
4675             if (put_user_ual(raddr, third))
4676                 return -TARGET_EFAULT;
4677             break;
4678         }
4679         case 1:
4680             ret = -TARGET_EINVAL;
4681             break;
4682         }
4683 	break;
4684     case IPCOP_shmdt:
4685         ret = do_shmdt(ptr);
4686 	break;
4687 
4688     case IPCOP_shmget:
4689 	/* IPC_* flag values are the same on all linux platforms */
4690 	ret = get_errno(shmget(first, second, third));
4691 	break;
4692 
4693 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4694     case IPCOP_shmctl:
4695         ret = do_shmctl(first, second, ptr);
4696         break;
4697     default:
4698         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4699                       call, version);
4700 	ret = -TARGET_ENOSYS;
4701 	break;
4702     }
4703     return ret;
4704 }
4705 #endif
4706 
4707 /* kernel structure types definitions */
4708 
4709 #define STRUCT(name, ...) STRUCT_ ## name,
4710 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4711 enum {
4712 #include "syscall_types.h"
4713 STRUCT_MAX
4714 };
4715 #undef STRUCT
4716 #undef STRUCT_SPECIAL
4717 
4718 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4719 #define STRUCT_SPECIAL(name)
4720 #include "syscall_types.h"
4721 #undef STRUCT
4722 #undef STRUCT_SPECIAL
4723 
4724 #define MAX_STRUCT_SIZE 4096
4725 
4726 #ifdef CONFIG_FIEMAP
4727 /* So fiemap access checks don't overflow on 32 bit systems.
4728  * This is very slightly smaller than the limit imposed by
4729  * the underlying kernel.
4730  */
4731 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4732                             / sizeof(struct fiemap_extent))
4733 
4734 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4735                                        int fd, int cmd, abi_long arg)
4736 {
4737     /* The parameter for this ioctl is a struct fiemap followed
4738      * by an array of struct fiemap_extent whose size is set
4739      * in fiemap->fm_extent_count. The array is filled in by the
4740      * ioctl.
4741      */
4742     int target_size_in, target_size_out;
4743     struct fiemap *fm;
4744     const argtype *arg_type = ie->arg_type;
4745     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4746     void *argptr, *p;
4747     abi_long ret;
4748     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4749     uint32_t outbufsz;
4750     int free_fm = 0;
4751 
4752     assert(arg_type[0] == TYPE_PTR);
4753     assert(ie->access == IOC_RW);
4754     arg_type++;
4755     target_size_in = thunk_type_size(arg_type, 0);
4756     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4757     if (!argptr) {
4758         return -TARGET_EFAULT;
4759     }
4760     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4761     unlock_user(argptr, arg, 0);
4762     fm = (struct fiemap *)buf_temp;
4763     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4764         return -TARGET_EINVAL;
4765     }
4766 
4767     outbufsz = sizeof (*fm) +
4768         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4769 
4770     if (outbufsz > MAX_STRUCT_SIZE) {
4771         /* We can't fit all the extents into the fixed size buffer.
4772          * Allocate one that is large enough and use it instead.
4773          */
4774         fm = g_try_malloc(outbufsz);
4775         if (!fm) {
4776             return -TARGET_ENOMEM;
4777         }
4778         memcpy(fm, buf_temp, sizeof(struct fiemap));
4779         free_fm = 1;
4780     }
4781     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4782     if (!is_error(ret)) {
4783         target_size_out = target_size_in;
4784         /* An extent_count of 0 means we were only counting the extents
4785          * so there are no structs to copy
4786          */
4787         if (fm->fm_extent_count != 0) {
4788             target_size_out += fm->fm_mapped_extents * extent_size;
4789         }
4790         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4791         if (!argptr) {
4792             ret = -TARGET_EFAULT;
4793         } else {
4794             /* Convert the struct fiemap */
4795             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4796             if (fm->fm_extent_count != 0) {
4797                 p = argptr + target_size_in;
4798                 /* ...and then all the struct fiemap_extents */
4799                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4800                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4801                                   THUNK_TARGET);
4802                     p += extent_size;
4803                 }
4804             }
4805             unlock_user(argptr, arg, target_size_out);
4806         }
4807     }
4808     if (free_fm) {
4809         g_free(fm);
4810     }
4811     return ret;
4812 }
4813 #endif
4814 
4815 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4816                                 int fd, int cmd, abi_long arg)
4817 {
4818     const argtype *arg_type = ie->arg_type;
4819     int target_size;
4820     void *argptr;
4821     int ret;
4822     struct ifconf *host_ifconf;
4823     uint32_t outbufsz;
4824     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4825     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4826     int target_ifreq_size;
4827     int nb_ifreq;
4828     int free_buf = 0;
4829     int i;
4830     int target_ifc_len;
4831     abi_long target_ifc_buf;
4832     int host_ifc_len;
4833     char *host_ifc_buf;
4834 
4835     assert(arg_type[0] == TYPE_PTR);
4836     assert(ie->access == IOC_RW);
4837 
4838     arg_type++;
4839     target_size = thunk_type_size(arg_type, 0);
4840 
4841     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4842     if (!argptr)
4843         return -TARGET_EFAULT;
4844     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4845     unlock_user(argptr, arg, 0);
4846 
4847     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4848     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4849     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4850 
4851     if (target_ifc_buf != 0) {
4852         target_ifc_len = host_ifconf->ifc_len;
4853         nb_ifreq = target_ifc_len / target_ifreq_size;
4854         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4855 
4856         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4857         if (outbufsz > MAX_STRUCT_SIZE) {
4858             /*
4859              * We can't fit all the extents into the fixed size buffer.
4860              * Allocate one that is large enough and use it instead.
4861              */
4862             host_ifconf = malloc(outbufsz);
4863             if (!host_ifconf) {
4864                 return -TARGET_ENOMEM;
4865             }
4866             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4867             free_buf = 1;
4868         }
4869         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4870 
4871         host_ifconf->ifc_len = host_ifc_len;
4872     } else {
4873       host_ifc_buf = NULL;
4874     }
4875     host_ifconf->ifc_buf = host_ifc_buf;
4876 
4877     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4878     if (!is_error(ret)) {
4879 	/* convert host ifc_len to target ifc_len */
4880 
4881         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4882         target_ifc_len = nb_ifreq * target_ifreq_size;
4883         host_ifconf->ifc_len = target_ifc_len;
4884 
4885 	/* restore target ifc_buf */
4886 
4887         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4888 
4889 	/* copy struct ifconf to target user */
4890 
4891         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4892         if (!argptr)
4893             return -TARGET_EFAULT;
4894         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4895         unlock_user(argptr, arg, target_size);
4896 
4897         if (target_ifc_buf != 0) {
4898             /* copy ifreq[] to target user */
4899             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4900             for (i = 0; i < nb_ifreq ; i++) {
4901                 thunk_convert(argptr + i * target_ifreq_size,
4902                               host_ifc_buf + i * sizeof(struct ifreq),
4903                               ifreq_arg_type, THUNK_TARGET);
4904             }
4905             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4906         }
4907     }
4908 
4909     if (free_buf) {
4910         free(host_ifconf);
4911     }
4912 
4913     return ret;
4914 }
4915 
4916 #if defined(CONFIG_USBFS)
4917 #if HOST_LONG_BITS > 64
4918 #error USBDEVFS thunks do not support >64 bit hosts yet.
4919 #endif
4920 struct live_urb {
4921     uint64_t target_urb_adr;
4922     uint64_t target_buf_adr;
4923     char *target_buf_ptr;
4924     struct usbdevfs_urb host_urb;
4925 };
4926 
4927 static GHashTable *usbdevfs_urb_hashtable(void)
4928 {
4929     static GHashTable *urb_hashtable;
4930 
4931     if (!urb_hashtable) {
4932         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4933     }
4934     return urb_hashtable;
4935 }
4936 
4937 static void urb_hashtable_insert(struct live_urb *urb)
4938 {
4939     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4940     g_hash_table_insert(urb_hashtable, urb, urb);
4941 }
4942 
4943 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4944 {
4945     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4946     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4947 }
4948 
4949 static void urb_hashtable_remove(struct live_urb *urb)
4950 {
4951     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4952     g_hash_table_remove(urb_hashtable, urb);
4953 }
4954 
4955 static abi_long
4956 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4957                           int fd, int cmd, abi_long arg)
4958 {
4959     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4960     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4961     struct live_urb *lurb;
4962     void *argptr;
4963     uint64_t hurb;
4964     int target_size;
4965     uintptr_t target_urb_adr;
4966     abi_long ret;
4967 
4968     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4969 
4970     memset(buf_temp, 0, sizeof(uint64_t));
4971     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4972     if (is_error(ret)) {
4973         return ret;
4974     }
4975 
4976     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4977     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4978     if (!lurb->target_urb_adr) {
4979         return -TARGET_EFAULT;
4980     }
4981     urb_hashtable_remove(lurb);
4982     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4983         lurb->host_urb.buffer_length);
4984     lurb->target_buf_ptr = NULL;
4985 
4986     /* restore the guest buffer pointer */
4987     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4988 
4989     /* update the guest urb struct */
4990     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4991     if (!argptr) {
4992         g_free(lurb);
4993         return -TARGET_EFAULT;
4994     }
4995     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4996     unlock_user(argptr, lurb->target_urb_adr, target_size);
4997 
4998     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4999     /* write back the urb handle */
5000     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5001     if (!argptr) {
5002         g_free(lurb);
5003         return -TARGET_EFAULT;
5004     }
5005 
5006     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5007     target_urb_adr = lurb->target_urb_adr;
5008     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5009     unlock_user(argptr, arg, target_size);
5010 
5011     g_free(lurb);
5012     return ret;
5013 }
5014 
5015 static abi_long
5016 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5017                              uint8_t *buf_temp __attribute__((unused)),
5018                              int fd, int cmd, abi_long arg)
5019 {
5020     struct live_urb *lurb;
5021 
5022     /* map target address back to host URB with metadata. */
5023     lurb = urb_hashtable_lookup(arg);
5024     if (!lurb) {
5025         return -TARGET_EFAULT;
5026     }
5027     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5028 }
5029 
5030 static abi_long
5031 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5032                             int fd, int cmd, abi_long arg)
5033 {
5034     const argtype *arg_type = ie->arg_type;
5035     int target_size;
5036     abi_long ret;
5037     void *argptr;
5038     int rw_dir;
5039     struct live_urb *lurb;
5040 
5041     /*
5042      * each submitted URB needs to map to a unique ID for the
5043      * kernel, and that unique ID needs to be a pointer to
5044      * host memory.  hence, we need to malloc for each URB.
5045      * isochronous transfers have a variable length struct.
5046      */
5047     arg_type++;
5048     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5049 
5050     /* construct host copy of urb and metadata */
5051     lurb = g_try_malloc0(sizeof(struct live_urb));
5052     if (!lurb) {
5053         return -TARGET_ENOMEM;
5054     }
5055 
5056     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5057     if (!argptr) {
5058         g_free(lurb);
5059         return -TARGET_EFAULT;
5060     }
5061     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5062     unlock_user(argptr, arg, 0);
5063 
5064     lurb->target_urb_adr = arg;
5065     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5066 
5067     /* buffer space used depends on endpoint type so lock the entire buffer */
5068     /* control type urbs should check the buffer contents for true direction */
5069     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5070     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5071         lurb->host_urb.buffer_length, 1);
5072     if (lurb->target_buf_ptr == NULL) {
5073         g_free(lurb);
5074         return -TARGET_EFAULT;
5075     }
5076 
5077     /* update buffer pointer in host copy */
5078     lurb->host_urb.buffer = lurb->target_buf_ptr;
5079 
5080     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5081     if (is_error(ret)) {
5082         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5083         g_free(lurb);
5084     } else {
5085         urb_hashtable_insert(lurb);
5086     }
5087 
5088     return ret;
5089 }
5090 #endif /* CONFIG_USBFS */
5091 
5092 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5093                             int cmd, abi_long arg)
5094 {
5095     void *argptr;
5096     struct dm_ioctl *host_dm;
5097     abi_long guest_data;
5098     uint32_t guest_data_size;
5099     int target_size;
5100     const argtype *arg_type = ie->arg_type;
5101     abi_long ret;
5102     void *big_buf = NULL;
5103     char *host_data;
5104 
5105     arg_type++;
5106     target_size = thunk_type_size(arg_type, 0);
5107     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5108     if (!argptr) {
5109         ret = -TARGET_EFAULT;
5110         goto out;
5111     }
5112     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5113     unlock_user(argptr, arg, 0);
5114 
5115     /* buf_temp is too small, so fetch things into a bigger buffer */
5116     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5117     memcpy(big_buf, buf_temp, target_size);
5118     buf_temp = big_buf;
5119     host_dm = big_buf;
5120 
5121     guest_data = arg + host_dm->data_start;
5122     if ((guest_data - arg) < 0) {
5123         ret = -TARGET_EINVAL;
5124         goto out;
5125     }
5126     guest_data_size = host_dm->data_size - host_dm->data_start;
5127     host_data = (char*)host_dm + host_dm->data_start;
5128 
5129     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5130     if (!argptr) {
5131         ret = -TARGET_EFAULT;
5132         goto out;
5133     }
5134 
5135     switch (ie->host_cmd) {
5136     case DM_REMOVE_ALL:
5137     case DM_LIST_DEVICES:
5138     case DM_DEV_CREATE:
5139     case DM_DEV_REMOVE:
5140     case DM_DEV_SUSPEND:
5141     case DM_DEV_STATUS:
5142     case DM_DEV_WAIT:
5143     case DM_TABLE_STATUS:
5144     case DM_TABLE_CLEAR:
5145     case DM_TABLE_DEPS:
5146     case DM_LIST_VERSIONS:
5147         /* no input data */
5148         break;
5149     case DM_DEV_RENAME:
5150     case DM_DEV_SET_GEOMETRY:
5151         /* data contains only strings */
5152         memcpy(host_data, argptr, guest_data_size);
5153         break;
5154     case DM_TARGET_MSG:
5155         memcpy(host_data, argptr, guest_data_size);
5156         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5157         break;
5158     case DM_TABLE_LOAD:
5159     {
5160         void *gspec = argptr;
5161         void *cur_data = host_data;
5162         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5163         int spec_size = thunk_type_size(arg_type, 0);
5164         int i;
5165 
5166         for (i = 0; i < host_dm->target_count; i++) {
5167             struct dm_target_spec *spec = cur_data;
5168             uint32_t next;
5169             int slen;
5170 
5171             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5172             slen = strlen((char*)gspec + spec_size) + 1;
5173             next = spec->next;
5174             spec->next = sizeof(*spec) + slen;
5175             strcpy((char*)&spec[1], gspec + spec_size);
5176             gspec += next;
5177             cur_data += spec->next;
5178         }
5179         break;
5180     }
5181     default:
5182         ret = -TARGET_EINVAL;
5183         unlock_user(argptr, guest_data, 0);
5184         goto out;
5185     }
5186     unlock_user(argptr, guest_data, 0);
5187 
5188     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5189     if (!is_error(ret)) {
5190         guest_data = arg + host_dm->data_start;
5191         guest_data_size = host_dm->data_size - host_dm->data_start;
5192         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5193         switch (ie->host_cmd) {
5194         case DM_REMOVE_ALL:
5195         case DM_DEV_CREATE:
5196         case DM_DEV_REMOVE:
5197         case DM_DEV_RENAME:
5198         case DM_DEV_SUSPEND:
5199         case DM_DEV_STATUS:
5200         case DM_TABLE_LOAD:
5201         case DM_TABLE_CLEAR:
5202         case DM_TARGET_MSG:
5203         case DM_DEV_SET_GEOMETRY:
5204             /* no return data */
5205             break;
5206         case DM_LIST_DEVICES:
5207         {
5208             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5209             uint32_t remaining_data = guest_data_size;
5210             void *cur_data = argptr;
5211             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5212             int nl_size = 12; /* can't use thunk_size due to alignment */
5213 
5214             while (1) {
5215                 uint32_t next = nl->next;
5216                 if (next) {
5217                     nl->next = nl_size + (strlen(nl->name) + 1);
5218                 }
5219                 if (remaining_data < nl->next) {
5220                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5221                     break;
5222                 }
5223                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5224                 strcpy(cur_data + nl_size, nl->name);
5225                 cur_data += nl->next;
5226                 remaining_data -= nl->next;
5227                 if (!next) {
5228                     break;
5229                 }
5230                 nl = (void*)nl + next;
5231             }
5232             break;
5233         }
5234         case DM_DEV_WAIT:
5235         case DM_TABLE_STATUS:
5236         {
5237             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5238             void *cur_data = argptr;
5239             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5240             int spec_size = thunk_type_size(arg_type, 0);
5241             int i;
5242 
5243             for (i = 0; i < host_dm->target_count; i++) {
5244                 uint32_t next = spec->next;
5245                 int slen = strlen((char*)&spec[1]) + 1;
5246                 spec->next = (cur_data - argptr) + spec_size + slen;
5247                 if (guest_data_size < spec->next) {
5248                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5249                     break;
5250                 }
5251                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5252                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5253                 cur_data = argptr + spec->next;
5254                 spec = (void*)host_dm + host_dm->data_start + next;
5255             }
5256             break;
5257         }
5258         case DM_TABLE_DEPS:
5259         {
5260             void *hdata = (void*)host_dm + host_dm->data_start;
5261             int count = *(uint32_t*)hdata;
5262             uint64_t *hdev = hdata + 8;
5263             uint64_t *gdev = argptr + 8;
5264             int i;
5265 
5266             *(uint32_t*)argptr = tswap32(count);
5267             for (i = 0; i < count; i++) {
5268                 *gdev = tswap64(*hdev);
5269                 gdev++;
5270                 hdev++;
5271             }
5272             break;
5273         }
5274         case DM_LIST_VERSIONS:
5275         {
5276             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5277             uint32_t remaining_data = guest_data_size;
5278             void *cur_data = argptr;
5279             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5280             int vers_size = thunk_type_size(arg_type, 0);
5281 
5282             while (1) {
5283                 uint32_t next = vers->next;
5284                 if (next) {
5285                     vers->next = vers_size + (strlen(vers->name) + 1);
5286                 }
5287                 if (remaining_data < vers->next) {
5288                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5289                     break;
5290                 }
5291                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5292                 strcpy(cur_data + vers_size, vers->name);
5293                 cur_data += vers->next;
5294                 remaining_data -= vers->next;
5295                 if (!next) {
5296                     break;
5297                 }
5298                 vers = (void*)vers + next;
5299             }
5300             break;
5301         }
5302         default:
5303             unlock_user(argptr, guest_data, 0);
5304             ret = -TARGET_EINVAL;
5305             goto out;
5306         }
5307         unlock_user(argptr, guest_data, guest_data_size);
5308 
5309         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5310         if (!argptr) {
5311             ret = -TARGET_EFAULT;
5312             goto out;
5313         }
5314         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5315         unlock_user(argptr, arg, target_size);
5316     }
5317 out:
5318     g_free(big_buf);
5319     return ret;
5320 }
5321 
5322 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5323                                int cmd, abi_long arg)
5324 {
5325     void *argptr;
5326     int target_size;
5327     const argtype *arg_type = ie->arg_type;
5328     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5329     abi_long ret;
5330 
5331     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5332     struct blkpg_partition host_part;
5333 
5334     /* Read and convert blkpg */
5335     arg_type++;
5336     target_size = thunk_type_size(arg_type, 0);
5337     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5338     if (!argptr) {
5339         ret = -TARGET_EFAULT;
5340         goto out;
5341     }
5342     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5343     unlock_user(argptr, arg, 0);
5344 
5345     switch (host_blkpg->op) {
5346     case BLKPG_ADD_PARTITION:
5347     case BLKPG_DEL_PARTITION:
5348         /* payload is struct blkpg_partition */
5349         break;
5350     default:
5351         /* Unknown opcode */
5352         ret = -TARGET_EINVAL;
5353         goto out;
5354     }
5355 
5356     /* Read and convert blkpg->data */
5357     arg = (abi_long)(uintptr_t)host_blkpg->data;
5358     target_size = thunk_type_size(part_arg_type, 0);
5359     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5360     if (!argptr) {
5361         ret = -TARGET_EFAULT;
5362         goto out;
5363     }
5364     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5365     unlock_user(argptr, arg, 0);
5366 
5367     /* Swizzle the data pointer to our local copy and call! */
5368     host_blkpg->data = &host_part;
5369     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5370 
5371 out:
5372     return ret;
5373 }
5374 
5375 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5376                                 int fd, int cmd, abi_long arg)
5377 {
5378     const argtype *arg_type = ie->arg_type;
5379     const StructEntry *se;
5380     const argtype *field_types;
5381     const int *dst_offsets, *src_offsets;
5382     int target_size;
5383     void *argptr;
5384     abi_ulong *target_rt_dev_ptr = NULL;
5385     unsigned long *host_rt_dev_ptr = NULL;
5386     abi_long ret;
5387     int i;
5388 
5389     assert(ie->access == IOC_W);
5390     assert(*arg_type == TYPE_PTR);
5391     arg_type++;
5392     assert(*arg_type == TYPE_STRUCT);
5393     target_size = thunk_type_size(arg_type, 0);
5394     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5395     if (!argptr) {
5396         return -TARGET_EFAULT;
5397     }
5398     arg_type++;
5399     assert(*arg_type == (int)STRUCT_rtentry);
5400     se = struct_entries + *arg_type++;
5401     assert(se->convert[0] == NULL);
5402     /* convert struct here to be able to catch rt_dev string */
5403     field_types = se->field_types;
5404     dst_offsets = se->field_offsets[THUNK_HOST];
5405     src_offsets = se->field_offsets[THUNK_TARGET];
5406     for (i = 0; i < se->nb_fields; i++) {
5407         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5408             assert(*field_types == TYPE_PTRVOID);
5409             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5410             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5411             if (*target_rt_dev_ptr != 0) {
5412                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5413                                                   tswapal(*target_rt_dev_ptr));
5414                 if (!*host_rt_dev_ptr) {
5415                     unlock_user(argptr, arg, 0);
5416                     return -TARGET_EFAULT;
5417                 }
5418             } else {
5419                 *host_rt_dev_ptr = 0;
5420             }
5421             field_types++;
5422             continue;
5423         }
5424         field_types = thunk_convert(buf_temp + dst_offsets[i],
5425                                     argptr + src_offsets[i],
5426                                     field_types, THUNK_HOST);
5427     }
5428     unlock_user(argptr, arg, 0);
5429 
5430     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5431 
5432     assert(host_rt_dev_ptr != NULL);
5433     assert(target_rt_dev_ptr != NULL);
5434     if (*host_rt_dev_ptr != 0) {
5435         unlock_user((void *)*host_rt_dev_ptr,
5436                     *target_rt_dev_ptr, 0);
5437     }
5438     return ret;
5439 }
5440 
5441 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5442                                      int fd, int cmd, abi_long arg)
5443 {
5444     int sig = target_to_host_signal(arg);
5445     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5446 }
5447 
5448 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5449                                     int fd, int cmd, abi_long arg)
5450 {
5451     struct timeval tv;
5452     abi_long ret;
5453 
5454     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5455     if (is_error(ret)) {
5456         return ret;
5457     }
5458 
5459     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5460         if (copy_to_user_timeval(arg, &tv)) {
5461             return -TARGET_EFAULT;
5462         }
5463     } else {
5464         if (copy_to_user_timeval64(arg, &tv)) {
5465             return -TARGET_EFAULT;
5466         }
5467     }
5468 
5469     return ret;
5470 }
5471 
5472 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5473                                       int fd, int cmd, abi_long arg)
5474 {
5475     struct timespec ts;
5476     abi_long ret;
5477 
5478     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5479     if (is_error(ret)) {
5480         return ret;
5481     }
5482 
5483     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5484         if (host_to_target_timespec(arg, &ts)) {
5485             return -TARGET_EFAULT;
5486         }
5487     } else{
5488         if (host_to_target_timespec64(arg, &ts)) {
5489             return -TARGET_EFAULT;
5490         }
5491     }
5492 
5493     return ret;
5494 }
5495 
5496 #ifdef TIOCGPTPEER
5497 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5498                                      int fd, int cmd, abi_long arg)
5499 {
5500     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5501     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5502 }
5503 #endif
5504 
5505 #ifdef HAVE_DRM_H
5506 
5507 static void unlock_drm_version(struct drm_version *host_ver,
5508                                struct target_drm_version *target_ver,
5509                                bool copy)
5510 {
5511     unlock_user(host_ver->name, target_ver->name,
5512                                 copy ? host_ver->name_len : 0);
5513     unlock_user(host_ver->date, target_ver->date,
5514                                 copy ? host_ver->date_len : 0);
5515     unlock_user(host_ver->desc, target_ver->desc,
5516                                 copy ? host_ver->desc_len : 0);
5517 }
5518 
5519 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5520                                           struct target_drm_version *target_ver)
5521 {
5522     memset(host_ver, 0, sizeof(*host_ver));
5523 
5524     __get_user(host_ver->name_len, &target_ver->name_len);
5525     if (host_ver->name_len) {
5526         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5527                                    target_ver->name_len, 0);
5528         if (!host_ver->name) {
5529             return -EFAULT;
5530         }
5531     }
5532 
5533     __get_user(host_ver->date_len, &target_ver->date_len);
5534     if (host_ver->date_len) {
5535         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5536                                    target_ver->date_len, 0);
5537         if (!host_ver->date) {
5538             goto err;
5539         }
5540     }
5541 
5542     __get_user(host_ver->desc_len, &target_ver->desc_len);
5543     if (host_ver->desc_len) {
5544         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5545                                    target_ver->desc_len, 0);
5546         if (!host_ver->desc) {
5547             goto err;
5548         }
5549     }
5550 
5551     return 0;
5552 err:
5553     unlock_drm_version(host_ver, target_ver, false);
5554     return -EFAULT;
5555 }
5556 
5557 static inline void host_to_target_drmversion(
5558                                           struct target_drm_version *target_ver,
5559                                           struct drm_version *host_ver)
5560 {
5561     __put_user(host_ver->version_major, &target_ver->version_major);
5562     __put_user(host_ver->version_minor, &target_ver->version_minor);
5563     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5564     __put_user(host_ver->name_len, &target_ver->name_len);
5565     __put_user(host_ver->date_len, &target_ver->date_len);
5566     __put_user(host_ver->desc_len, &target_ver->desc_len);
5567     unlock_drm_version(host_ver, target_ver, true);
5568 }
5569 
5570 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5571                              int fd, int cmd, abi_long arg)
5572 {
5573     struct drm_version *ver;
5574     struct target_drm_version *target_ver;
5575     abi_long ret;
5576 
5577     switch (ie->host_cmd) {
5578     case DRM_IOCTL_VERSION:
5579         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5580             return -TARGET_EFAULT;
5581         }
5582         ver = (struct drm_version *)buf_temp;
5583         ret = target_to_host_drmversion(ver, target_ver);
5584         if (!is_error(ret)) {
5585             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5586             if (is_error(ret)) {
5587                 unlock_drm_version(ver, target_ver, false);
5588             } else {
5589                 host_to_target_drmversion(target_ver, ver);
5590             }
5591         }
5592         unlock_user_struct(target_ver, arg, 0);
5593         return ret;
5594     }
5595     return -TARGET_ENOSYS;
5596 }
5597 
5598 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5599                                            struct drm_i915_getparam *gparam,
5600                                            int fd, abi_long arg)
5601 {
5602     abi_long ret;
5603     int value;
5604     struct target_drm_i915_getparam *target_gparam;
5605 
5606     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5607         return -TARGET_EFAULT;
5608     }
5609 
5610     __get_user(gparam->param, &target_gparam->param);
5611     gparam->value = &value;
5612     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5613     put_user_s32(value, target_gparam->value);
5614 
5615     unlock_user_struct(target_gparam, arg, 0);
5616     return ret;
5617 }
5618 
5619 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5620                                   int fd, int cmd, abi_long arg)
5621 {
5622     switch (ie->host_cmd) {
5623     case DRM_IOCTL_I915_GETPARAM:
5624         return do_ioctl_drm_i915_getparam(ie,
5625                                           (struct drm_i915_getparam *)buf_temp,
5626                                           fd, arg);
5627     default:
5628         return -TARGET_ENOSYS;
5629     }
5630 }
5631 
5632 #endif
5633 
5634 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5635                                         int fd, int cmd, abi_long arg)
5636 {
5637     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5638     struct tun_filter *target_filter;
5639     char *target_addr;
5640 
5641     assert(ie->access == IOC_W);
5642 
5643     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5644     if (!target_filter) {
5645         return -TARGET_EFAULT;
5646     }
5647     filter->flags = tswap16(target_filter->flags);
5648     filter->count = tswap16(target_filter->count);
5649     unlock_user(target_filter, arg, 0);
5650 
5651     if (filter->count) {
5652         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5653             MAX_STRUCT_SIZE) {
5654             return -TARGET_EFAULT;
5655         }
5656 
5657         target_addr = lock_user(VERIFY_READ,
5658                                 arg + offsetof(struct tun_filter, addr),
5659                                 filter->count * ETH_ALEN, 1);
5660         if (!target_addr) {
5661             return -TARGET_EFAULT;
5662         }
5663         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5664         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5665     }
5666 
5667     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5668 }
5669 
5670 IOCTLEntry ioctl_entries[] = {
5671 #define IOCTL(cmd, access, ...) \
5672     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5673 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5674     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5675 #define IOCTL_IGNORE(cmd) \
5676     { TARGET_ ## cmd, 0, #cmd },
5677 #include "ioctls.h"
5678     { 0, 0, },
5679 };
5680 
5681 /* ??? Implement proper locking for ioctls.  */
5682 /* do_ioctl() Must return target values and target errnos. */
5683 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5684 {
5685     const IOCTLEntry *ie;
5686     const argtype *arg_type;
5687     abi_long ret;
5688     uint8_t buf_temp[MAX_STRUCT_SIZE];
5689     int target_size;
5690     void *argptr;
5691 
5692     ie = ioctl_entries;
5693     for(;;) {
5694         if (ie->target_cmd == 0) {
5695             qemu_log_mask(
5696                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5697             return -TARGET_ENOSYS;
5698         }
5699         if (ie->target_cmd == cmd)
5700             break;
5701         ie++;
5702     }
5703     arg_type = ie->arg_type;
5704     if (ie->do_ioctl) {
5705         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5706     } else if (!ie->host_cmd) {
5707         /* Some architectures define BSD ioctls in their headers
5708            that are not implemented in Linux.  */
5709         return -TARGET_ENOSYS;
5710     }
5711 
5712     switch(arg_type[0]) {
5713     case TYPE_NULL:
5714         /* no argument */
5715         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5716         break;
5717     case TYPE_PTRVOID:
5718     case TYPE_INT:
5719     case TYPE_LONG:
5720     case TYPE_ULONG:
5721         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5722         break;
5723     case TYPE_PTR:
5724         arg_type++;
5725         target_size = thunk_type_size(arg_type, 0);
5726         switch(ie->access) {
5727         case IOC_R:
5728             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5729             if (!is_error(ret)) {
5730                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5731                 if (!argptr)
5732                     return -TARGET_EFAULT;
5733                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5734                 unlock_user(argptr, arg, target_size);
5735             }
5736             break;
5737         case IOC_W:
5738             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5739             if (!argptr)
5740                 return -TARGET_EFAULT;
5741             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5742             unlock_user(argptr, arg, 0);
5743             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5744             break;
5745         default:
5746         case IOC_RW:
5747             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5748             if (!argptr)
5749                 return -TARGET_EFAULT;
5750             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5751             unlock_user(argptr, arg, 0);
5752             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5753             if (!is_error(ret)) {
5754                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5755                 if (!argptr)
5756                     return -TARGET_EFAULT;
5757                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5758                 unlock_user(argptr, arg, target_size);
5759             }
5760             break;
5761         }
5762         break;
5763     default:
5764         qemu_log_mask(LOG_UNIMP,
5765                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5766                       (long)cmd, arg_type[0]);
5767         ret = -TARGET_ENOSYS;
5768         break;
5769     }
5770     return ret;
5771 }
5772 
5773 static const bitmask_transtbl iflag_tbl[] = {
5774         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5775         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5776         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5777         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5778         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5779         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5780         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5781         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5782         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5783         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5784         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5785         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5786         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5787         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5788         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5789         { 0, 0, 0, 0 }
5790 };
5791 
5792 static const bitmask_transtbl oflag_tbl[] = {
5793 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5794 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5795 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5796 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5797 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5798 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5799 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5800 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5801 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5802 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5803 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5804 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5805 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5806 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5807 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5808 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5809 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5810 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5811 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5812 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5813 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5814 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5815 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5816 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5817 	{ 0, 0, 0, 0 }
5818 };
5819 
5820 static const bitmask_transtbl cflag_tbl[] = {
5821 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5822 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5823 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5824 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5825 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5826 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5827 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5828 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5829 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5830 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5831 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5832 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5833 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5834 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5835 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5836 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5837 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5838 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5839 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5840 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5841 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5842 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5843 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5844 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5845 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5846 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5847 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5848 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5849 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5850 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5851 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5852 	{ 0, 0, 0, 0 }
5853 };
5854 
5855 static const bitmask_transtbl lflag_tbl[] = {
5856   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5857   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5858   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5859   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5860   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5861   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5862   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5863   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5864   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5865   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5866   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5867   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5868   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5869   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5870   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5871   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5872   { 0, 0, 0, 0 }
5873 };
5874 
5875 static void target_to_host_termios (void *dst, const void *src)
5876 {
5877     struct host_termios *host = dst;
5878     const struct target_termios *target = src;
5879 
5880     host->c_iflag =
5881         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5882     host->c_oflag =
5883         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5884     host->c_cflag =
5885         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5886     host->c_lflag =
5887         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5888     host->c_line = target->c_line;
5889 
5890     memset(host->c_cc, 0, sizeof(host->c_cc));
5891     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5892     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5893     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5894     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5895     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5896     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5897     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5898     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5899     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5900     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5901     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5902     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5903     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5904     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5905     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5906     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5907     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5908 }
5909 
5910 static void host_to_target_termios (void *dst, const void *src)
5911 {
5912     struct target_termios *target = dst;
5913     const struct host_termios *host = src;
5914 
5915     target->c_iflag =
5916         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5917     target->c_oflag =
5918         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5919     target->c_cflag =
5920         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5921     target->c_lflag =
5922         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5923     target->c_line = host->c_line;
5924 
5925     memset(target->c_cc, 0, sizeof(target->c_cc));
5926     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5927     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5928     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5929     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5930     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5931     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5932     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5933     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5934     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5935     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5936     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5937     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5938     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5939     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5940     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5941     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5942     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5943 }
5944 
5945 static const StructEntry struct_termios_def = {
5946     .convert = { host_to_target_termios, target_to_host_termios },
5947     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5948     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5949     .print = print_termios,
5950 };
5951 
5952 static const bitmask_transtbl mmap_flags_tbl[] = {
5953     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5954     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5955     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5956     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5957       MAP_ANONYMOUS, MAP_ANONYMOUS },
5958     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5959       MAP_GROWSDOWN, MAP_GROWSDOWN },
5960     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5961       MAP_DENYWRITE, MAP_DENYWRITE },
5962     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5963       MAP_EXECUTABLE, MAP_EXECUTABLE },
5964     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5965     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5966       MAP_NORESERVE, MAP_NORESERVE },
5967     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5968     /* MAP_STACK had been ignored by the kernel for quite some time.
5969        Recognize it for the target insofar as we do not want to pass
5970        it through to the host.  */
5971     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5972     { 0, 0, 0, 0 }
5973 };
5974 
5975 /*
5976  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5977  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5978  */
5979 #if defined(TARGET_I386)
5980 
5981 /* NOTE: there is really one LDT for all the threads */
5982 static uint8_t *ldt_table;
5983 
5984 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5985 {
5986     int size;
5987     void *p;
5988 
5989     if (!ldt_table)
5990         return 0;
5991     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5992     if (size > bytecount)
5993         size = bytecount;
5994     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5995     if (!p)
5996         return -TARGET_EFAULT;
5997     /* ??? Should this by byteswapped?  */
5998     memcpy(p, ldt_table, size);
5999     unlock_user(p, ptr, size);
6000     return size;
6001 }
6002 
6003 /* XXX: add locking support */
6004 static abi_long write_ldt(CPUX86State *env,
6005                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6006 {
6007     struct target_modify_ldt_ldt_s ldt_info;
6008     struct target_modify_ldt_ldt_s *target_ldt_info;
6009     int seg_32bit, contents, read_exec_only, limit_in_pages;
6010     int seg_not_present, useable, lm;
6011     uint32_t *lp, entry_1, entry_2;
6012 
6013     if (bytecount != sizeof(ldt_info))
6014         return -TARGET_EINVAL;
6015     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6016         return -TARGET_EFAULT;
6017     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6018     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6019     ldt_info.limit = tswap32(target_ldt_info->limit);
6020     ldt_info.flags = tswap32(target_ldt_info->flags);
6021     unlock_user_struct(target_ldt_info, ptr, 0);
6022 
6023     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6024         return -TARGET_EINVAL;
6025     seg_32bit = ldt_info.flags & 1;
6026     contents = (ldt_info.flags >> 1) & 3;
6027     read_exec_only = (ldt_info.flags >> 3) & 1;
6028     limit_in_pages = (ldt_info.flags >> 4) & 1;
6029     seg_not_present = (ldt_info.flags >> 5) & 1;
6030     useable = (ldt_info.flags >> 6) & 1;
6031 #ifdef TARGET_ABI32
6032     lm = 0;
6033 #else
6034     lm = (ldt_info.flags >> 7) & 1;
6035 #endif
6036     if (contents == 3) {
6037         if (oldmode)
6038             return -TARGET_EINVAL;
6039         if (seg_not_present == 0)
6040             return -TARGET_EINVAL;
6041     }
6042     /* allocate the LDT */
6043     if (!ldt_table) {
6044         env->ldt.base = target_mmap(0,
6045                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6046                                     PROT_READ|PROT_WRITE,
6047                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6048         if (env->ldt.base == -1)
6049             return -TARGET_ENOMEM;
6050         memset(g2h_untagged(env->ldt.base), 0,
6051                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6052         env->ldt.limit = 0xffff;
6053         ldt_table = g2h_untagged(env->ldt.base);
6054     }
6055 
6056     /* NOTE: same code as Linux kernel */
6057     /* Allow LDTs to be cleared by the user. */
6058     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6059         if (oldmode ||
6060             (contents == 0		&&
6061              read_exec_only == 1	&&
6062              seg_32bit == 0		&&
6063              limit_in_pages == 0	&&
6064              seg_not_present == 1	&&
6065              useable == 0 )) {
6066             entry_1 = 0;
6067             entry_2 = 0;
6068             goto install;
6069         }
6070     }
6071 
6072     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6073         (ldt_info.limit & 0x0ffff);
6074     entry_2 = (ldt_info.base_addr & 0xff000000) |
6075         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6076         (ldt_info.limit & 0xf0000) |
6077         ((read_exec_only ^ 1) << 9) |
6078         (contents << 10) |
6079         ((seg_not_present ^ 1) << 15) |
6080         (seg_32bit << 22) |
6081         (limit_in_pages << 23) |
6082         (lm << 21) |
6083         0x7000;
6084     if (!oldmode)
6085         entry_2 |= (useable << 20);
6086 
6087     /* Install the new entry ...  */
6088 install:
6089     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6090     lp[0] = tswap32(entry_1);
6091     lp[1] = tswap32(entry_2);
6092     return 0;
6093 }
6094 
6095 /* specific and weird i386 syscalls */
6096 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6097                               unsigned long bytecount)
6098 {
6099     abi_long ret;
6100 
6101     switch (func) {
6102     case 0:
6103         ret = read_ldt(ptr, bytecount);
6104         break;
6105     case 1:
6106         ret = write_ldt(env, ptr, bytecount, 1);
6107         break;
6108     case 0x11:
6109         ret = write_ldt(env, ptr, bytecount, 0);
6110         break;
6111     default:
6112         ret = -TARGET_ENOSYS;
6113         break;
6114     }
6115     return ret;
6116 }
6117 
6118 #if defined(TARGET_ABI32)
6119 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6120 {
6121     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6122     struct target_modify_ldt_ldt_s ldt_info;
6123     struct target_modify_ldt_ldt_s *target_ldt_info;
6124     int seg_32bit, contents, read_exec_only, limit_in_pages;
6125     int seg_not_present, useable, lm;
6126     uint32_t *lp, entry_1, entry_2;
6127     int i;
6128 
6129     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6130     if (!target_ldt_info)
6131         return -TARGET_EFAULT;
6132     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6133     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6134     ldt_info.limit = tswap32(target_ldt_info->limit);
6135     ldt_info.flags = tswap32(target_ldt_info->flags);
6136     if (ldt_info.entry_number == -1) {
6137         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6138             if (gdt_table[i] == 0) {
6139                 ldt_info.entry_number = i;
6140                 target_ldt_info->entry_number = tswap32(i);
6141                 break;
6142             }
6143         }
6144     }
6145     unlock_user_struct(target_ldt_info, ptr, 1);
6146 
6147     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6148         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6149            return -TARGET_EINVAL;
6150     seg_32bit = ldt_info.flags & 1;
6151     contents = (ldt_info.flags >> 1) & 3;
6152     read_exec_only = (ldt_info.flags >> 3) & 1;
6153     limit_in_pages = (ldt_info.flags >> 4) & 1;
6154     seg_not_present = (ldt_info.flags >> 5) & 1;
6155     useable = (ldt_info.flags >> 6) & 1;
6156 #ifdef TARGET_ABI32
6157     lm = 0;
6158 #else
6159     lm = (ldt_info.flags >> 7) & 1;
6160 #endif
6161 
6162     if (contents == 3) {
6163         if (seg_not_present == 0)
6164             return -TARGET_EINVAL;
6165     }
6166 
6167     /* NOTE: same code as Linux kernel */
6168     /* Allow LDTs to be cleared by the user. */
6169     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6170         if ((contents == 0             &&
6171              read_exec_only == 1       &&
6172              seg_32bit == 0            &&
6173              limit_in_pages == 0       &&
6174              seg_not_present == 1      &&
6175              useable == 0 )) {
6176             entry_1 = 0;
6177             entry_2 = 0;
6178             goto install;
6179         }
6180     }
6181 
6182     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6183         (ldt_info.limit & 0x0ffff);
6184     entry_2 = (ldt_info.base_addr & 0xff000000) |
6185         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6186         (ldt_info.limit & 0xf0000) |
6187         ((read_exec_only ^ 1) << 9) |
6188         (contents << 10) |
6189         ((seg_not_present ^ 1) << 15) |
6190         (seg_32bit << 22) |
6191         (limit_in_pages << 23) |
6192         (useable << 20) |
6193         (lm << 21) |
6194         0x7000;
6195 
6196     /* Install the new entry ...  */
6197 install:
6198     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6199     lp[0] = tswap32(entry_1);
6200     lp[1] = tswap32(entry_2);
6201     return 0;
6202 }
6203 
6204 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6205 {
6206     struct target_modify_ldt_ldt_s *target_ldt_info;
6207     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6208     uint32_t base_addr, limit, flags;
6209     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6210     int seg_not_present, useable, lm;
6211     uint32_t *lp, entry_1, entry_2;
6212 
6213     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6214     if (!target_ldt_info)
6215         return -TARGET_EFAULT;
6216     idx = tswap32(target_ldt_info->entry_number);
6217     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6218         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6219         unlock_user_struct(target_ldt_info, ptr, 1);
6220         return -TARGET_EINVAL;
6221     }
6222     lp = (uint32_t *)(gdt_table + idx);
6223     entry_1 = tswap32(lp[0]);
6224     entry_2 = tswap32(lp[1]);
6225 
6226     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6227     contents = (entry_2 >> 10) & 3;
6228     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6229     seg_32bit = (entry_2 >> 22) & 1;
6230     limit_in_pages = (entry_2 >> 23) & 1;
6231     useable = (entry_2 >> 20) & 1;
6232 #ifdef TARGET_ABI32
6233     lm = 0;
6234 #else
6235     lm = (entry_2 >> 21) & 1;
6236 #endif
6237     flags = (seg_32bit << 0) | (contents << 1) |
6238         (read_exec_only << 3) | (limit_in_pages << 4) |
6239         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6240     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6241     base_addr = (entry_1 >> 16) |
6242         (entry_2 & 0xff000000) |
6243         ((entry_2 & 0xff) << 16);
6244     target_ldt_info->base_addr = tswapal(base_addr);
6245     target_ldt_info->limit = tswap32(limit);
6246     target_ldt_info->flags = tswap32(flags);
6247     unlock_user_struct(target_ldt_info, ptr, 1);
6248     return 0;
6249 }
6250 
6251 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6252 {
6253     return -TARGET_ENOSYS;
6254 }
6255 #else
6256 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6257 {
6258     abi_long ret = 0;
6259     abi_ulong val;
6260     int idx;
6261 
6262     switch(code) {
6263     case TARGET_ARCH_SET_GS:
6264     case TARGET_ARCH_SET_FS:
6265         if (code == TARGET_ARCH_SET_GS)
6266             idx = R_GS;
6267         else
6268             idx = R_FS;
6269         cpu_x86_load_seg(env, idx, 0);
6270         env->segs[idx].base = addr;
6271         break;
6272     case TARGET_ARCH_GET_GS:
6273     case TARGET_ARCH_GET_FS:
6274         if (code == TARGET_ARCH_GET_GS)
6275             idx = R_GS;
6276         else
6277             idx = R_FS;
6278         val = env->segs[idx].base;
6279         if (put_user(val, addr, abi_ulong))
6280             ret = -TARGET_EFAULT;
6281         break;
6282     default:
6283         ret = -TARGET_EINVAL;
6284         break;
6285     }
6286     return ret;
6287 }
6288 #endif /* defined(TARGET_ABI32 */
6289 
6290 #endif /* defined(TARGET_I386) */
6291 
6292 #define NEW_STACK_SIZE 0x40000
6293 
6294 
6295 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6296 typedef struct {
6297     CPUArchState *env;
6298     pthread_mutex_t mutex;
6299     pthread_cond_t cond;
6300     pthread_t thread;
6301     uint32_t tid;
6302     abi_ulong child_tidptr;
6303     abi_ulong parent_tidptr;
6304     sigset_t sigmask;
6305 } new_thread_info;
6306 
6307 static void *clone_func(void *arg)
6308 {
6309     new_thread_info *info = arg;
6310     CPUArchState *env;
6311     CPUState *cpu;
6312     TaskState *ts;
6313 
6314     rcu_register_thread();
6315     tcg_register_thread();
6316     env = info->env;
6317     cpu = env_cpu(env);
6318     thread_cpu = cpu;
6319     ts = (TaskState *)cpu->opaque;
6320     info->tid = sys_gettid();
6321     task_settid(ts);
6322     if (info->child_tidptr)
6323         put_user_u32(info->tid, info->child_tidptr);
6324     if (info->parent_tidptr)
6325         put_user_u32(info->tid, info->parent_tidptr);
6326     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6327     /* Enable signals.  */
6328     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6329     /* Signal to the parent that we're ready.  */
6330     pthread_mutex_lock(&info->mutex);
6331     pthread_cond_broadcast(&info->cond);
6332     pthread_mutex_unlock(&info->mutex);
6333     /* Wait until the parent has finished initializing the tls state.  */
6334     pthread_mutex_lock(&clone_lock);
6335     pthread_mutex_unlock(&clone_lock);
6336     cpu_loop(env);
6337     /* never exits */
6338     return NULL;
6339 }
6340 
6341 /* do_fork() Must return host values and target errnos (unlike most
6342    do_*() functions). */
6343 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6344                    abi_ulong parent_tidptr, target_ulong newtls,
6345                    abi_ulong child_tidptr)
6346 {
6347     CPUState *cpu = env_cpu(env);
6348     int ret;
6349     TaskState *ts;
6350     CPUState *new_cpu;
6351     CPUArchState *new_env;
6352     sigset_t sigmask;
6353 
6354     flags &= ~CLONE_IGNORED_FLAGS;
6355 
6356     /* Emulate vfork() with fork() */
6357     if (flags & CLONE_VFORK)
6358         flags &= ~(CLONE_VFORK | CLONE_VM);
6359 
6360     if (flags & CLONE_VM) {
6361         TaskState *parent_ts = (TaskState *)cpu->opaque;
6362         new_thread_info info;
6363         pthread_attr_t attr;
6364 
6365         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6366             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6367             return -TARGET_EINVAL;
6368         }
6369 
6370         ts = g_new0(TaskState, 1);
6371         init_task_state(ts);
6372 
6373         /* Grab a mutex so that thread setup appears atomic.  */
6374         pthread_mutex_lock(&clone_lock);
6375 
6376         /*
6377          * If this is our first additional thread, we need to ensure we
6378          * generate code for parallel execution and flush old translations.
6379          * Do this now so that the copy gets CF_PARALLEL too.
6380          */
6381         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6382             cpu->tcg_cflags |= CF_PARALLEL;
6383             tb_flush(cpu);
6384         }
6385 
6386         /* we create a new CPU instance. */
6387         new_env = cpu_copy(env);
6388         /* Init regs that differ from the parent.  */
6389         cpu_clone_regs_child(new_env, newsp, flags);
6390         cpu_clone_regs_parent(env, flags);
6391         new_cpu = env_cpu(new_env);
6392         new_cpu->opaque = ts;
6393         ts->bprm = parent_ts->bprm;
6394         ts->info = parent_ts->info;
6395         ts->signal_mask = parent_ts->signal_mask;
6396 
6397         if (flags & CLONE_CHILD_CLEARTID) {
6398             ts->child_tidptr = child_tidptr;
6399         }
6400 
6401         if (flags & CLONE_SETTLS) {
6402             cpu_set_tls (new_env, newtls);
6403         }
6404 
6405         memset(&info, 0, sizeof(info));
6406         pthread_mutex_init(&info.mutex, NULL);
6407         pthread_mutex_lock(&info.mutex);
6408         pthread_cond_init(&info.cond, NULL);
6409         info.env = new_env;
6410         if (flags & CLONE_CHILD_SETTID) {
6411             info.child_tidptr = child_tidptr;
6412         }
6413         if (flags & CLONE_PARENT_SETTID) {
6414             info.parent_tidptr = parent_tidptr;
6415         }
6416 
6417         ret = pthread_attr_init(&attr);
6418         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6419         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6420         /* It is not safe to deliver signals until the child has finished
6421            initializing, so temporarily block all signals.  */
6422         sigfillset(&sigmask);
6423         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6424         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6425 
6426         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6427         /* TODO: Free new CPU state if thread creation failed.  */
6428 
6429         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6430         pthread_attr_destroy(&attr);
6431         if (ret == 0) {
6432             /* Wait for the child to initialize.  */
6433             pthread_cond_wait(&info.cond, &info.mutex);
6434             ret = info.tid;
6435         } else {
6436             ret = -1;
6437         }
6438         pthread_mutex_unlock(&info.mutex);
6439         pthread_cond_destroy(&info.cond);
6440         pthread_mutex_destroy(&info.mutex);
6441         pthread_mutex_unlock(&clone_lock);
6442     } else {
6443         /* if no CLONE_VM, we consider it is a fork */
6444         if (flags & CLONE_INVALID_FORK_FLAGS) {
6445             return -TARGET_EINVAL;
6446         }
6447 
6448         /* We can't support custom termination signals */
6449         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6450             return -TARGET_EINVAL;
6451         }
6452 
6453         if (block_signals()) {
6454             return -TARGET_ERESTARTSYS;
6455         }
6456 
6457         fork_start();
6458         ret = fork();
6459         if (ret == 0) {
6460             /* Child Process.  */
6461             cpu_clone_regs_child(env, newsp, flags);
6462             fork_end(1);
6463             /* There is a race condition here.  The parent process could
6464                theoretically read the TID in the child process before the child
6465                tid is set.  This would require using either ptrace
6466                (not implemented) or having *_tidptr to point at a shared memory
6467                mapping.  We can't repeat the spinlock hack used above because
6468                the child process gets its own copy of the lock.  */
6469             if (flags & CLONE_CHILD_SETTID)
6470                 put_user_u32(sys_gettid(), child_tidptr);
6471             if (flags & CLONE_PARENT_SETTID)
6472                 put_user_u32(sys_gettid(), parent_tidptr);
6473             ts = (TaskState *)cpu->opaque;
6474             if (flags & CLONE_SETTLS)
6475                 cpu_set_tls (env, newtls);
6476             if (flags & CLONE_CHILD_CLEARTID)
6477                 ts->child_tidptr = child_tidptr;
6478         } else {
6479             cpu_clone_regs_parent(env, flags);
6480             fork_end(0);
6481         }
6482     }
6483     return ret;
6484 }
6485 
6486 /* warning : doesn't handle linux specific flags... */
6487 static int target_to_host_fcntl_cmd(int cmd)
6488 {
6489     int ret;
6490 
6491     switch(cmd) {
6492     case TARGET_F_DUPFD:
6493     case TARGET_F_GETFD:
6494     case TARGET_F_SETFD:
6495     case TARGET_F_GETFL:
6496     case TARGET_F_SETFL:
6497     case TARGET_F_OFD_GETLK:
6498     case TARGET_F_OFD_SETLK:
6499     case TARGET_F_OFD_SETLKW:
6500         ret = cmd;
6501         break;
6502     case TARGET_F_GETLK:
6503         ret = F_GETLK64;
6504         break;
6505     case TARGET_F_SETLK:
6506         ret = F_SETLK64;
6507         break;
6508     case TARGET_F_SETLKW:
6509         ret = F_SETLKW64;
6510         break;
6511     case TARGET_F_GETOWN:
6512         ret = F_GETOWN;
6513         break;
6514     case TARGET_F_SETOWN:
6515         ret = F_SETOWN;
6516         break;
6517     case TARGET_F_GETSIG:
6518         ret = F_GETSIG;
6519         break;
6520     case TARGET_F_SETSIG:
6521         ret = F_SETSIG;
6522         break;
6523 #if TARGET_ABI_BITS == 32
6524     case TARGET_F_GETLK64:
6525         ret = F_GETLK64;
6526         break;
6527     case TARGET_F_SETLK64:
6528         ret = F_SETLK64;
6529         break;
6530     case TARGET_F_SETLKW64:
6531         ret = F_SETLKW64;
6532         break;
6533 #endif
6534     case TARGET_F_SETLEASE:
6535         ret = F_SETLEASE;
6536         break;
6537     case TARGET_F_GETLEASE:
6538         ret = F_GETLEASE;
6539         break;
6540 #ifdef F_DUPFD_CLOEXEC
6541     case TARGET_F_DUPFD_CLOEXEC:
6542         ret = F_DUPFD_CLOEXEC;
6543         break;
6544 #endif
6545     case TARGET_F_NOTIFY:
6546         ret = F_NOTIFY;
6547         break;
6548 #ifdef F_GETOWN_EX
6549     case TARGET_F_GETOWN_EX:
6550         ret = F_GETOWN_EX;
6551         break;
6552 #endif
6553 #ifdef F_SETOWN_EX
6554     case TARGET_F_SETOWN_EX:
6555         ret = F_SETOWN_EX;
6556         break;
6557 #endif
6558 #ifdef F_SETPIPE_SZ
6559     case TARGET_F_SETPIPE_SZ:
6560         ret = F_SETPIPE_SZ;
6561         break;
6562     case TARGET_F_GETPIPE_SZ:
6563         ret = F_GETPIPE_SZ;
6564         break;
6565 #endif
6566 #ifdef F_ADD_SEALS
6567     case TARGET_F_ADD_SEALS:
6568         ret = F_ADD_SEALS;
6569         break;
6570     case TARGET_F_GET_SEALS:
6571         ret = F_GET_SEALS;
6572         break;
6573 #endif
6574     default:
6575         ret = -TARGET_EINVAL;
6576         break;
6577     }
6578 
6579 #if defined(__powerpc64__)
6580     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6581      * is not supported by kernel. The glibc fcntl call actually adjusts
6582      * them to 5, 6 and 7 before making the syscall(). Since we make the
6583      * syscall directly, adjust to what is supported by the kernel.
6584      */
6585     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6586         ret -= F_GETLK64 - 5;
6587     }
6588 #endif
6589 
6590     return ret;
6591 }
6592 
6593 #define FLOCK_TRANSTBL \
6594     switch (type) { \
6595     TRANSTBL_CONVERT(F_RDLCK); \
6596     TRANSTBL_CONVERT(F_WRLCK); \
6597     TRANSTBL_CONVERT(F_UNLCK); \
6598     }
6599 
6600 static int target_to_host_flock(int type)
6601 {
6602 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6603     FLOCK_TRANSTBL
6604 #undef  TRANSTBL_CONVERT
6605     return -TARGET_EINVAL;
6606 }
6607 
6608 static int host_to_target_flock(int type)
6609 {
6610 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6611     FLOCK_TRANSTBL
6612 #undef  TRANSTBL_CONVERT
6613     /* if we don't know how to convert the value coming
6614      * from the host we copy to the target field as-is
6615      */
6616     return type;
6617 }
6618 
6619 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6620                                             abi_ulong target_flock_addr)
6621 {
6622     struct target_flock *target_fl;
6623     int l_type;
6624 
6625     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6626         return -TARGET_EFAULT;
6627     }
6628 
6629     __get_user(l_type, &target_fl->l_type);
6630     l_type = target_to_host_flock(l_type);
6631     if (l_type < 0) {
6632         return l_type;
6633     }
6634     fl->l_type = l_type;
6635     __get_user(fl->l_whence, &target_fl->l_whence);
6636     __get_user(fl->l_start, &target_fl->l_start);
6637     __get_user(fl->l_len, &target_fl->l_len);
6638     __get_user(fl->l_pid, &target_fl->l_pid);
6639     unlock_user_struct(target_fl, target_flock_addr, 0);
6640     return 0;
6641 }
6642 
6643 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6644                                           const struct flock64 *fl)
6645 {
6646     struct target_flock *target_fl;
6647     short l_type;
6648 
6649     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6650         return -TARGET_EFAULT;
6651     }
6652 
6653     l_type = host_to_target_flock(fl->l_type);
6654     __put_user(l_type, &target_fl->l_type);
6655     __put_user(fl->l_whence, &target_fl->l_whence);
6656     __put_user(fl->l_start, &target_fl->l_start);
6657     __put_user(fl->l_len, &target_fl->l_len);
6658     __put_user(fl->l_pid, &target_fl->l_pid);
6659     unlock_user_struct(target_fl, target_flock_addr, 1);
6660     return 0;
6661 }
6662 
6663 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6664 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6665 
6666 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6667 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6668                                                    abi_ulong target_flock_addr)
6669 {
6670     struct target_oabi_flock64 *target_fl;
6671     int l_type;
6672 
6673     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6674         return -TARGET_EFAULT;
6675     }
6676 
6677     __get_user(l_type, &target_fl->l_type);
6678     l_type = target_to_host_flock(l_type);
6679     if (l_type < 0) {
6680         return l_type;
6681     }
6682     fl->l_type = l_type;
6683     __get_user(fl->l_whence, &target_fl->l_whence);
6684     __get_user(fl->l_start, &target_fl->l_start);
6685     __get_user(fl->l_len, &target_fl->l_len);
6686     __get_user(fl->l_pid, &target_fl->l_pid);
6687     unlock_user_struct(target_fl, target_flock_addr, 0);
6688     return 0;
6689 }
6690 
6691 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6692                                                  const struct flock64 *fl)
6693 {
6694     struct target_oabi_flock64 *target_fl;
6695     short l_type;
6696 
6697     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6698         return -TARGET_EFAULT;
6699     }
6700 
6701     l_type = host_to_target_flock(fl->l_type);
6702     __put_user(l_type, &target_fl->l_type);
6703     __put_user(fl->l_whence, &target_fl->l_whence);
6704     __put_user(fl->l_start, &target_fl->l_start);
6705     __put_user(fl->l_len, &target_fl->l_len);
6706     __put_user(fl->l_pid, &target_fl->l_pid);
6707     unlock_user_struct(target_fl, target_flock_addr, 1);
6708     return 0;
6709 }
6710 #endif
6711 
6712 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6713                                               abi_ulong target_flock_addr)
6714 {
6715     struct target_flock64 *target_fl;
6716     int l_type;
6717 
6718     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6719         return -TARGET_EFAULT;
6720     }
6721 
6722     __get_user(l_type, &target_fl->l_type);
6723     l_type = target_to_host_flock(l_type);
6724     if (l_type < 0) {
6725         return l_type;
6726     }
6727     fl->l_type = l_type;
6728     __get_user(fl->l_whence, &target_fl->l_whence);
6729     __get_user(fl->l_start, &target_fl->l_start);
6730     __get_user(fl->l_len, &target_fl->l_len);
6731     __get_user(fl->l_pid, &target_fl->l_pid);
6732     unlock_user_struct(target_fl, target_flock_addr, 0);
6733     return 0;
6734 }
6735 
6736 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6737                                             const struct flock64 *fl)
6738 {
6739     struct target_flock64 *target_fl;
6740     short l_type;
6741 
6742     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6743         return -TARGET_EFAULT;
6744     }
6745 
6746     l_type = host_to_target_flock(fl->l_type);
6747     __put_user(l_type, &target_fl->l_type);
6748     __put_user(fl->l_whence, &target_fl->l_whence);
6749     __put_user(fl->l_start, &target_fl->l_start);
6750     __put_user(fl->l_len, &target_fl->l_len);
6751     __put_user(fl->l_pid, &target_fl->l_pid);
6752     unlock_user_struct(target_fl, target_flock_addr, 1);
6753     return 0;
6754 }
6755 
6756 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6757 {
6758     struct flock64 fl64;
6759 #ifdef F_GETOWN_EX
6760     struct f_owner_ex fox;
6761     struct target_f_owner_ex *target_fox;
6762 #endif
6763     abi_long ret;
6764     int host_cmd = target_to_host_fcntl_cmd(cmd);
6765 
6766     if (host_cmd == -TARGET_EINVAL)
6767 	    return host_cmd;
6768 
6769     switch(cmd) {
6770     case TARGET_F_GETLK:
6771         ret = copy_from_user_flock(&fl64, arg);
6772         if (ret) {
6773             return ret;
6774         }
6775         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6776         if (ret == 0) {
6777             ret = copy_to_user_flock(arg, &fl64);
6778         }
6779         break;
6780 
6781     case TARGET_F_SETLK:
6782     case TARGET_F_SETLKW:
6783         ret = copy_from_user_flock(&fl64, arg);
6784         if (ret) {
6785             return ret;
6786         }
6787         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6788         break;
6789 
6790     case TARGET_F_GETLK64:
6791     case TARGET_F_OFD_GETLK:
6792         ret = copy_from_user_flock64(&fl64, arg);
6793         if (ret) {
6794             return ret;
6795         }
6796         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6797         if (ret == 0) {
6798             ret = copy_to_user_flock64(arg, &fl64);
6799         }
6800         break;
6801     case TARGET_F_SETLK64:
6802     case TARGET_F_SETLKW64:
6803     case TARGET_F_OFD_SETLK:
6804     case TARGET_F_OFD_SETLKW:
6805         ret = copy_from_user_flock64(&fl64, arg);
6806         if (ret) {
6807             return ret;
6808         }
6809         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6810         break;
6811 
6812     case TARGET_F_GETFL:
6813         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6814         if (ret >= 0) {
6815             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6816         }
6817         break;
6818 
6819     case TARGET_F_SETFL:
6820         ret = get_errno(safe_fcntl(fd, host_cmd,
6821                                    target_to_host_bitmask(arg,
6822                                                           fcntl_flags_tbl)));
6823         break;
6824 
6825 #ifdef F_GETOWN_EX
6826     case TARGET_F_GETOWN_EX:
6827         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6828         if (ret >= 0) {
6829             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6830                 return -TARGET_EFAULT;
6831             target_fox->type = tswap32(fox.type);
6832             target_fox->pid = tswap32(fox.pid);
6833             unlock_user_struct(target_fox, arg, 1);
6834         }
6835         break;
6836 #endif
6837 
6838 #ifdef F_SETOWN_EX
6839     case TARGET_F_SETOWN_EX:
6840         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6841             return -TARGET_EFAULT;
6842         fox.type = tswap32(target_fox->type);
6843         fox.pid = tswap32(target_fox->pid);
6844         unlock_user_struct(target_fox, arg, 0);
6845         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6846         break;
6847 #endif
6848 
6849     case TARGET_F_SETSIG:
6850         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6851         break;
6852 
6853     case TARGET_F_GETSIG:
6854         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6855         break;
6856 
6857     case TARGET_F_SETOWN:
6858     case TARGET_F_GETOWN:
6859     case TARGET_F_SETLEASE:
6860     case TARGET_F_GETLEASE:
6861     case TARGET_F_SETPIPE_SZ:
6862     case TARGET_F_GETPIPE_SZ:
6863     case TARGET_F_ADD_SEALS:
6864     case TARGET_F_GET_SEALS:
6865         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6866         break;
6867 
6868     default:
6869         ret = get_errno(safe_fcntl(fd, cmd, arg));
6870         break;
6871     }
6872     return ret;
6873 }
6874 
6875 #ifdef USE_UID16
6876 
6877 static inline int high2lowuid(int uid)
6878 {
6879     if (uid > 65535)
6880         return 65534;
6881     else
6882         return uid;
6883 }
6884 
6885 static inline int high2lowgid(int gid)
6886 {
6887     if (gid > 65535)
6888         return 65534;
6889     else
6890         return gid;
6891 }
6892 
6893 static inline int low2highuid(int uid)
6894 {
6895     if ((int16_t)uid == -1)
6896         return -1;
6897     else
6898         return uid;
6899 }
6900 
6901 static inline int low2highgid(int gid)
6902 {
6903     if ((int16_t)gid == -1)
6904         return -1;
6905     else
6906         return gid;
6907 }
6908 static inline int tswapid(int id)
6909 {
6910     return tswap16(id);
6911 }
6912 
6913 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6914 
6915 #else /* !USE_UID16 */
6916 static inline int high2lowuid(int uid)
6917 {
6918     return uid;
6919 }
6920 static inline int high2lowgid(int gid)
6921 {
6922     return gid;
6923 }
6924 static inline int low2highuid(int uid)
6925 {
6926     return uid;
6927 }
6928 static inline int low2highgid(int gid)
6929 {
6930     return gid;
6931 }
6932 static inline int tswapid(int id)
6933 {
6934     return tswap32(id);
6935 }
6936 
6937 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6938 
6939 #endif /* USE_UID16 */
6940 
6941 /* We must do direct syscalls for setting UID/GID, because we want to
6942  * implement the Linux system call semantics of "change only for this thread",
6943  * not the libc/POSIX semantics of "change for all threads in process".
6944  * (See http://ewontfix.com/17/ for more details.)
6945  * We use the 32-bit version of the syscalls if present; if it is not
6946  * then either the host architecture supports 32-bit UIDs natively with
6947  * the standard syscall, or the 16-bit UID is the best we can do.
6948  */
6949 #ifdef __NR_setuid32
6950 #define __NR_sys_setuid __NR_setuid32
6951 #else
6952 #define __NR_sys_setuid __NR_setuid
6953 #endif
6954 #ifdef __NR_setgid32
6955 #define __NR_sys_setgid __NR_setgid32
6956 #else
6957 #define __NR_sys_setgid __NR_setgid
6958 #endif
6959 #ifdef __NR_setresuid32
6960 #define __NR_sys_setresuid __NR_setresuid32
6961 #else
6962 #define __NR_sys_setresuid __NR_setresuid
6963 #endif
6964 #ifdef __NR_setresgid32
6965 #define __NR_sys_setresgid __NR_setresgid32
6966 #else
6967 #define __NR_sys_setresgid __NR_setresgid
6968 #endif
6969 
6970 _syscall1(int, sys_setuid, uid_t, uid)
6971 _syscall1(int, sys_setgid, gid_t, gid)
6972 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6973 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6974 
6975 void syscall_init(void)
6976 {
6977     IOCTLEntry *ie;
6978     const argtype *arg_type;
6979     int size;
6980 
6981     thunk_init(STRUCT_MAX);
6982 
6983 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6984 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6985 #include "syscall_types.h"
6986 #undef STRUCT
6987 #undef STRUCT_SPECIAL
6988 
6989     /* we patch the ioctl size if necessary. We rely on the fact that
6990        no ioctl has all the bits at '1' in the size field */
6991     ie = ioctl_entries;
6992     while (ie->target_cmd != 0) {
6993         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6994             TARGET_IOC_SIZEMASK) {
6995             arg_type = ie->arg_type;
6996             if (arg_type[0] != TYPE_PTR) {
6997                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6998                         ie->target_cmd);
6999                 exit(1);
7000             }
7001             arg_type++;
7002             size = thunk_type_size(arg_type, 0);
7003             ie->target_cmd = (ie->target_cmd &
7004                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7005                 (size << TARGET_IOC_SIZESHIFT);
7006         }
7007 
7008         /* automatic consistency check if same arch */
7009 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7010     (defined(__x86_64__) && defined(TARGET_X86_64))
7011         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7012             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7013                     ie->name, ie->target_cmd, ie->host_cmd);
7014         }
7015 #endif
7016         ie++;
7017     }
7018 }
7019 
7020 #ifdef TARGET_NR_truncate64
7021 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7022                                          abi_long arg2,
7023                                          abi_long arg3,
7024                                          abi_long arg4)
7025 {
7026     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7027         arg2 = arg3;
7028         arg3 = arg4;
7029     }
7030     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7031 }
7032 #endif
7033 
7034 #ifdef TARGET_NR_ftruncate64
7035 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7036                                           abi_long arg2,
7037                                           abi_long arg3,
7038                                           abi_long arg4)
7039 {
7040     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7041         arg2 = arg3;
7042         arg3 = arg4;
7043     }
7044     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7045 }
7046 #endif
7047 
7048 #if defined(TARGET_NR_timer_settime) || \
7049     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7050 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7051                                                  abi_ulong target_addr)
7052 {
7053     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7054                                 offsetof(struct target_itimerspec,
7055                                          it_interval)) ||
7056         target_to_host_timespec(&host_its->it_value, target_addr +
7057                                 offsetof(struct target_itimerspec,
7058                                          it_value))) {
7059         return -TARGET_EFAULT;
7060     }
7061 
7062     return 0;
7063 }
7064 #endif
7065 
7066 #if defined(TARGET_NR_timer_settime64) || \
7067     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7068 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7069                                                    abi_ulong target_addr)
7070 {
7071     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7072                                   offsetof(struct target__kernel_itimerspec,
7073                                            it_interval)) ||
7074         target_to_host_timespec64(&host_its->it_value, target_addr +
7075                                   offsetof(struct target__kernel_itimerspec,
7076                                            it_value))) {
7077         return -TARGET_EFAULT;
7078     }
7079 
7080     return 0;
7081 }
7082 #endif
7083 
7084 #if ((defined(TARGET_NR_timerfd_gettime) || \
7085       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7086       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7087 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7088                                                  struct itimerspec *host_its)
7089 {
7090     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7091                                                        it_interval),
7092                                 &host_its->it_interval) ||
7093         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7094                                                        it_value),
7095                                 &host_its->it_value)) {
7096         return -TARGET_EFAULT;
7097     }
7098     return 0;
7099 }
7100 #endif
7101 
7102 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7103       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7104       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7105 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7106                                                    struct itimerspec *host_its)
7107 {
7108     if (host_to_target_timespec64(target_addr +
7109                                   offsetof(struct target__kernel_itimerspec,
7110                                            it_interval),
7111                                   &host_its->it_interval) ||
7112         host_to_target_timespec64(target_addr +
7113                                   offsetof(struct target__kernel_itimerspec,
7114                                            it_value),
7115                                   &host_its->it_value)) {
7116         return -TARGET_EFAULT;
7117     }
7118     return 0;
7119 }
7120 #endif
7121 
7122 #if defined(TARGET_NR_adjtimex) || \
7123     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7124 static inline abi_long target_to_host_timex(struct timex *host_tx,
7125                                             abi_long target_addr)
7126 {
7127     struct target_timex *target_tx;
7128 
7129     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7130         return -TARGET_EFAULT;
7131     }
7132 
7133     __get_user(host_tx->modes, &target_tx->modes);
7134     __get_user(host_tx->offset, &target_tx->offset);
7135     __get_user(host_tx->freq, &target_tx->freq);
7136     __get_user(host_tx->maxerror, &target_tx->maxerror);
7137     __get_user(host_tx->esterror, &target_tx->esterror);
7138     __get_user(host_tx->status, &target_tx->status);
7139     __get_user(host_tx->constant, &target_tx->constant);
7140     __get_user(host_tx->precision, &target_tx->precision);
7141     __get_user(host_tx->tolerance, &target_tx->tolerance);
7142     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7143     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7144     __get_user(host_tx->tick, &target_tx->tick);
7145     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7146     __get_user(host_tx->jitter, &target_tx->jitter);
7147     __get_user(host_tx->shift, &target_tx->shift);
7148     __get_user(host_tx->stabil, &target_tx->stabil);
7149     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7150     __get_user(host_tx->calcnt, &target_tx->calcnt);
7151     __get_user(host_tx->errcnt, &target_tx->errcnt);
7152     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7153     __get_user(host_tx->tai, &target_tx->tai);
7154 
7155     unlock_user_struct(target_tx, target_addr, 0);
7156     return 0;
7157 }
7158 
7159 static inline abi_long host_to_target_timex(abi_long target_addr,
7160                                             struct timex *host_tx)
7161 {
7162     struct target_timex *target_tx;
7163 
7164     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7165         return -TARGET_EFAULT;
7166     }
7167 
7168     __put_user(host_tx->modes, &target_tx->modes);
7169     __put_user(host_tx->offset, &target_tx->offset);
7170     __put_user(host_tx->freq, &target_tx->freq);
7171     __put_user(host_tx->maxerror, &target_tx->maxerror);
7172     __put_user(host_tx->esterror, &target_tx->esterror);
7173     __put_user(host_tx->status, &target_tx->status);
7174     __put_user(host_tx->constant, &target_tx->constant);
7175     __put_user(host_tx->precision, &target_tx->precision);
7176     __put_user(host_tx->tolerance, &target_tx->tolerance);
7177     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7178     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7179     __put_user(host_tx->tick, &target_tx->tick);
7180     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7181     __put_user(host_tx->jitter, &target_tx->jitter);
7182     __put_user(host_tx->shift, &target_tx->shift);
7183     __put_user(host_tx->stabil, &target_tx->stabil);
7184     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7185     __put_user(host_tx->calcnt, &target_tx->calcnt);
7186     __put_user(host_tx->errcnt, &target_tx->errcnt);
7187     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7188     __put_user(host_tx->tai, &target_tx->tai);
7189 
7190     unlock_user_struct(target_tx, target_addr, 1);
7191     return 0;
7192 }
7193 #endif
7194 
7195 
7196 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7197 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7198                                               abi_long target_addr)
7199 {
7200     struct target__kernel_timex *target_tx;
7201 
7202     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7203                                  offsetof(struct target__kernel_timex,
7204                                           time))) {
7205         return -TARGET_EFAULT;
7206     }
7207 
7208     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7209         return -TARGET_EFAULT;
7210     }
7211 
7212     __get_user(host_tx->modes, &target_tx->modes);
7213     __get_user(host_tx->offset, &target_tx->offset);
7214     __get_user(host_tx->freq, &target_tx->freq);
7215     __get_user(host_tx->maxerror, &target_tx->maxerror);
7216     __get_user(host_tx->esterror, &target_tx->esterror);
7217     __get_user(host_tx->status, &target_tx->status);
7218     __get_user(host_tx->constant, &target_tx->constant);
7219     __get_user(host_tx->precision, &target_tx->precision);
7220     __get_user(host_tx->tolerance, &target_tx->tolerance);
7221     __get_user(host_tx->tick, &target_tx->tick);
7222     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7223     __get_user(host_tx->jitter, &target_tx->jitter);
7224     __get_user(host_tx->shift, &target_tx->shift);
7225     __get_user(host_tx->stabil, &target_tx->stabil);
7226     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7227     __get_user(host_tx->calcnt, &target_tx->calcnt);
7228     __get_user(host_tx->errcnt, &target_tx->errcnt);
7229     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7230     __get_user(host_tx->tai, &target_tx->tai);
7231 
7232     unlock_user_struct(target_tx, target_addr, 0);
7233     return 0;
7234 }
7235 
7236 static inline abi_long host_to_target_timex64(abi_long target_addr,
7237                                               struct timex *host_tx)
7238 {
7239     struct target__kernel_timex *target_tx;
7240 
7241    if (copy_to_user_timeval64(target_addr +
7242                               offsetof(struct target__kernel_timex, time),
7243                               &host_tx->time)) {
7244         return -TARGET_EFAULT;
7245     }
7246 
7247     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7248         return -TARGET_EFAULT;
7249     }
7250 
7251     __put_user(host_tx->modes, &target_tx->modes);
7252     __put_user(host_tx->offset, &target_tx->offset);
7253     __put_user(host_tx->freq, &target_tx->freq);
7254     __put_user(host_tx->maxerror, &target_tx->maxerror);
7255     __put_user(host_tx->esterror, &target_tx->esterror);
7256     __put_user(host_tx->status, &target_tx->status);
7257     __put_user(host_tx->constant, &target_tx->constant);
7258     __put_user(host_tx->precision, &target_tx->precision);
7259     __put_user(host_tx->tolerance, &target_tx->tolerance);
7260     __put_user(host_tx->tick, &target_tx->tick);
7261     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7262     __put_user(host_tx->jitter, &target_tx->jitter);
7263     __put_user(host_tx->shift, &target_tx->shift);
7264     __put_user(host_tx->stabil, &target_tx->stabil);
7265     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7266     __put_user(host_tx->calcnt, &target_tx->calcnt);
7267     __put_user(host_tx->errcnt, &target_tx->errcnt);
7268     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7269     __put_user(host_tx->tai, &target_tx->tai);
7270 
7271     unlock_user_struct(target_tx, target_addr, 1);
7272     return 0;
7273 }
7274 #endif
7275 
7276 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7277 #define sigev_notify_thread_id _sigev_un._tid
7278 #endif
7279 
7280 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7281                                                abi_ulong target_addr)
7282 {
7283     struct target_sigevent *target_sevp;
7284 
7285     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7286         return -TARGET_EFAULT;
7287     }
7288 
7289     /* This union is awkward on 64 bit systems because it has a 32 bit
7290      * integer and a pointer in it; we follow the conversion approach
7291      * used for handling sigval types in signal.c so the guest should get
7292      * the correct value back even if we did a 64 bit byteswap and it's
7293      * using the 32 bit integer.
7294      */
7295     host_sevp->sigev_value.sival_ptr =
7296         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7297     host_sevp->sigev_signo =
7298         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7299     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7300     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7301 
7302     unlock_user_struct(target_sevp, target_addr, 1);
7303     return 0;
7304 }
7305 
7306 #if defined(TARGET_NR_mlockall)
7307 static inline int target_to_host_mlockall_arg(int arg)
7308 {
7309     int result = 0;
7310 
7311     if (arg & TARGET_MCL_CURRENT) {
7312         result |= MCL_CURRENT;
7313     }
7314     if (arg & TARGET_MCL_FUTURE) {
7315         result |= MCL_FUTURE;
7316     }
7317 #ifdef MCL_ONFAULT
7318     if (arg & TARGET_MCL_ONFAULT) {
7319         result |= MCL_ONFAULT;
7320     }
7321 #endif
7322 
7323     return result;
7324 }
7325 #endif
7326 
7327 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7328      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7329      defined(TARGET_NR_newfstatat))
7330 static inline abi_long host_to_target_stat64(void *cpu_env,
7331                                              abi_ulong target_addr,
7332                                              struct stat *host_st)
7333 {
7334 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7335     if (((CPUARMState *)cpu_env)->eabi) {
7336         struct target_eabi_stat64 *target_st;
7337 
7338         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7339             return -TARGET_EFAULT;
7340         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7341         __put_user(host_st->st_dev, &target_st->st_dev);
7342         __put_user(host_st->st_ino, &target_st->st_ino);
7343 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7344         __put_user(host_st->st_ino, &target_st->__st_ino);
7345 #endif
7346         __put_user(host_st->st_mode, &target_st->st_mode);
7347         __put_user(host_st->st_nlink, &target_st->st_nlink);
7348         __put_user(host_st->st_uid, &target_st->st_uid);
7349         __put_user(host_st->st_gid, &target_st->st_gid);
7350         __put_user(host_st->st_rdev, &target_st->st_rdev);
7351         __put_user(host_st->st_size, &target_st->st_size);
7352         __put_user(host_st->st_blksize, &target_st->st_blksize);
7353         __put_user(host_st->st_blocks, &target_st->st_blocks);
7354         __put_user(host_st->st_atime, &target_st->target_st_atime);
7355         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7356         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7357 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7358         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7359         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7360         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7361 #endif
7362         unlock_user_struct(target_st, target_addr, 1);
7363     } else
7364 #endif
7365     {
7366 #if defined(TARGET_HAS_STRUCT_STAT64)
7367         struct target_stat64 *target_st;
7368 #else
7369         struct target_stat *target_st;
7370 #endif
7371 
7372         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7373             return -TARGET_EFAULT;
7374         memset(target_st, 0, sizeof(*target_st));
7375         __put_user(host_st->st_dev, &target_st->st_dev);
7376         __put_user(host_st->st_ino, &target_st->st_ino);
7377 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7378         __put_user(host_st->st_ino, &target_st->__st_ino);
7379 #endif
7380         __put_user(host_st->st_mode, &target_st->st_mode);
7381         __put_user(host_st->st_nlink, &target_st->st_nlink);
7382         __put_user(host_st->st_uid, &target_st->st_uid);
7383         __put_user(host_st->st_gid, &target_st->st_gid);
7384         __put_user(host_st->st_rdev, &target_st->st_rdev);
7385         /* XXX: better use of kernel struct */
7386         __put_user(host_st->st_size, &target_st->st_size);
7387         __put_user(host_st->st_blksize, &target_st->st_blksize);
7388         __put_user(host_st->st_blocks, &target_st->st_blocks);
7389         __put_user(host_st->st_atime, &target_st->target_st_atime);
7390         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7391         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7392 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7393         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7394         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7395         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7396 #endif
7397         unlock_user_struct(target_st, target_addr, 1);
7398     }
7399 
7400     return 0;
7401 }
7402 #endif
7403 
7404 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7405 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7406                                             abi_ulong target_addr)
7407 {
7408     struct target_statx *target_stx;
7409 
7410     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7411         return -TARGET_EFAULT;
7412     }
7413     memset(target_stx, 0, sizeof(*target_stx));
7414 
7415     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7416     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7417     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7418     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7419     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7420     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7421     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7422     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7423     __put_user(host_stx->stx_size, &target_stx->stx_size);
7424     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7425     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7426     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7427     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7428     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7429     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7430     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7431     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7432     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7433     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7434     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7435     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7436     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7437     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7438 
7439     unlock_user_struct(target_stx, target_addr, 1);
7440 
7441     return 0;
7442 }
7443 #endif
7444 
7445 static int do_sys_futex(int *uaddr, int op, int val,
7446                          const struct timespec *timeout, int *uaddr2,
7447                          int val3)
7448 {
7449 #if HOST_LONG_BITS == 64
7450 #if defined(__NR_futex)
7451     /* always a 64-bit time_t, it doesn't define _time64 version  */
7452     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7453 
7454 #endif
7455 #else /* HOST_LONG_BITS == 64 */
7456 #if defined(__NR_futex_time64)
7457     if (sizeof(timeout->tv_sec) == 8) {
7458         /* _time64 function on 32bit arch */
7459         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7460     }
7461 #endif
7462 #if defined(__NR_futex)
7463     /* old function on 32bit arch */
7464     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7465 #endif
7466 #endif /* HOST_LONG_BITS == 64 */
7467     g_assert_not_reached();
7468 }
7469 
7470 static int do_safe_futex(int *uaddr, int op, int val,
7471                          const struct timespec *timeout, int *uaddr2,
7472                          int val3)
7473 {
7474 #if HOST_LONG_BITS == 64
7475 #if defined(__NR_futex)
7476     /* always a 64-bit time_t, it doesn't define _time64 version  */
7477     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7478 #endif
7479 #else /* HOST_LONG_BITS == 64 */
7480 #if defined(__NR_futex_time64)
7481     if (sizeof(timeout->tv_sec) == 8) {
7482         /* _time64 function on 32bit arch */
7483         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7484                                            val3));
7485     }
7486 #endif
7487 #if defined(__NR_futex)
7488     /* old function on 32bit arch */
7489     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7490 #endif
7491 #endif /* HOST_LONG_BITS == 64 */
7492     return -TARGET_ENOSYS;
7493 }
7494 
7495 /* ??? Using host futex calls even when target atomic operations
7496    are not really atomic probably breaks things.  However implementing
7497    futexes locally would make futexes shared between multiple processes
7498    tricky.  However they're probably useless because guest atomic
7499    operations won't work either.  */
7500 #if defined(TARGET_NR_futex)
7501 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7502                     target_ulong timeout, target_ulong uaddr2, int val3)
7503 {
7504     struct timespec ts, *pts;
7505     int base_op;
7506 
7507     /* ??? We assume FUTEX_* constants are the same on both host
7508        and target.  */
7509 #ifdef FUTEX_CMD_MASK
7510     base_op = op & FUTEX_CMD_MASK;
7511 #else
7512     base_op = op;
7513 #endif
7514     switch (base_op) {
7515     case FUTEX_WAIT:
7516     case FUTEX_WAIT_BITSET:
7517         if (timeout) {
7518             pts = &ts;
7519             target_to_host_timespec(pts, timeout);
7520         } else {
7521             pts = NULL;
7522         }
7523         return do_safe_futex(g2h(cpu, uaddr),
7524                              op, tswap32(val), pts, NULL, val3);
7525     case FUTEX_WAKE:
7526         return do_safe_futex(g2h(cpu, uaddr),
7527                              op, val, NULL, NULL, 0);
7528     case FUTEX_FD:
7529         return do_safe_futex(g2h(cpu, uaddr),
7530                              op, val, NULL, NULL, 0);
7531     case FUTEX_REQUEUE:
7532     case FUTEX_CMP_REQUEUE:
7533     case FUTEX_WAKE_OP:
7534         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7535            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7536            But the prototype takes a `struct timespec *'; insert casts
7537            to satisfy the compiler.  We do not need to tswap TIMEOUT
7538            since it's not compared to guest memory.  */
7539         pts = (struct timespec *)(uintptr_t) timeout;
7540         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7541                              (base_op == FUTEX_CMP_REQUEUE
7542                               ? tswap32(val3) : val3));
7543     default:
7544         return -TARGET_ENOSYS;
7545     }
7546 }
7547 #endif
7548 
7549 #if defined(TARGET_NR_futex_time64)
7550 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7551                            int val, target_ulong timeout,
7552                            target_ulong uaddr2, int val3)
7553 {
7554     struct timespec ts, *pts;
7555     int base_op;
7556 
7557     /* ??? We assume FUTEX_* constants are the same on both host
7558        and target.  */
7559 #ifdef FUTEX_CMD_MASK
7560     base_op = op & FUTEX_CMD_MASK;
7561 #else
7562     base_op = op;
7563 #endif
7564     switch (base_op) {
7565     case FUTEX_WAIT:
7566     case FUTEX_WAIT_BITSET:
7567         if (timeout) {
7568             pts = &ts;
7569             if (target_to_host_timespec64(pts, timeout)) {
7570                 return -TARGET_EFAULT;
7571             }
7572         } else {
7573             pts = NULL;
7574         }
7575         return do_safe_futex(g2h(cpu, uaddr), op,
7576                              tswap32(val), pts, NULL, val3);
7577     case FUTEX_WAKE:
7578         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7579     case FUTEX_FD:
7580         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7581     case FUTEX_REQUEUE:
7582     case FUTEX_CMP_REQUEUE:
7583     case FUTEX_WAKE_OP:
7584         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7585            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7586            But the prototype takes a `struct timespec *'; insert casts
7587            to satisfy the compiler.  We do not need to tswap TIMEOUT
7588            since it's not compared to guest memory.  */
7589         pts = (struct timespec *)(uintptr_t) timeout;
7590         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7591                              (base_op == FUTEX_CMP_REQUEUE
7592                               ? tswap32(val3) : val3));
7593     default:
7594         return -TARGET_ENOSYS;
7595     }
7596 }
7597 #endif
7598 
7599 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7600 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7601                                      abi_long handle, abi_long mount_id,
7602                                      abi_long flags)
7603 {
7604     struct file_handle *target_fh;
7605     struct file_handle *fh;
7606     int mid = 0;
7607     abi_long ret;
7608     char *name;
7609     unsigned int size, total_size;
7610 
7611     if (get_user_s32(size, handle)) {
7612         return -TARGET_EFAULT;
7613     }
7614 
7615     name = lock_user_string(pathname);
7616     if (!name) {
7617         return -TARGET_EFAULT;
7618     }
7619 
7620     total_size = sizeof(struct file_handle) + size;
7621     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7622     if (!target_fh) {
7623         unlock_user(name, pathname, 0);
7624         return -TARGET_EFAULT;
7625     }
7626 
7627     fh = g_malloc0(total_size);
7628     fh->handle_bytes = size;
7629 
7630     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7631     unlock_user(name, pathname, 0);
7632 
7633     /* man name_to_handle_at(2):
7634      * Other than the use of the handle_bytes field, the caller should treat
7635      * the file_handle structure as an opaque data type
7636      */
7637 
7638     memcpy(target_fh, fh, total_size);
7639     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7640     target_fh->handle_type = tswap32(fh->handle_type);
7641     g_free(fh);
7642     unlock_user(target_fh, handle, total_size);
7643 
7644     if (put_user_s32(mid, mount_id)) {
7645         return -TARGET_EFAULT;
7646     }
7647 
7648     return ret;
7649 
7650 }
7651 #endif
7652 
7653 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7654 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7655                                      abi_long flags)
7656 {
7657     struct file_handle *target_fh;
7658     struct file_handle *fh;
7659     unsigned int size, total_size;
7660     abi_long ret;
7661 
7662     if (get_user_s32(size, handle)) {
7663         return -TARGET_EFAULT;
7664     }
7665 
7666     total_size = sizeof(struct file_handle) + size;
7667     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7668     if (!target_fh) {
7669         return -TARGET_EFAULT;
7670     }
7671 
7672     fh = g_memdup(target_fh, total_size);
7673     fh->handle_bytes = size;
7674     fh->handle_type = tswap32(target_fh->handle_type);
7675 
7676     ret = get_errno(open_by_handle_at(mount_fd, fh,
7677                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7678 
7679     g_free(fh);
7680 
7681     unlock_user(target_fh, handle, total_size);
7682 
7683     return ret;
7684 }
7685 #endif
7686 
7687 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7688 
7689 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7690 {
7691     int host_flags;
7692     target_sigset_t *target_mask;
7693     sigset_t host_mask;
7694     abi_long ret;
7695 
7696     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7697         return -TARGET_EINVAL;
7698     }
7699     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7700         return -TARGET_EFAULT;
7701     }
7702 
7703     target_to_host_sigset(&host_mask, target_mask);
7704 
7705     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7706 
7707     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7708     if (ret >= 0) {
7709         fd_trans_register(ret, &target_signalfd_trans);
7710     }
7711 
7712     unlock_user_struct(target_mask, mask, 0);
7713 
7714     return ret;
7715 }
7716 #endif
7717 
7718 /* Map host to target signal numbers for the wait family of syscalls.
7719    Assume all other status bits are the same.  */
7720 int host_to_target_waitstatus(int status)
7721 {
7722     if (WIFSIGNALED(status)) {
7723         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7724     }
7725     if (WIFSTOPPED(status)) {
7726         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7727                | (status & 0xff);
7728     }
7729     return status;
7730 }
7731 
7732 static int open_self_cmdline(void *cpu_env, int fd)
7733 {
7734     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7735     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7736     int i;
7737 
7738     for (i = 0; i < bprm->argc; i++) {
7739         size_t len = strlen(bprm->argv[i]) + 1;
7740 
7741         if (write(fd, bprm->argv[i], len) != len) {
7742             return -1;
7743         }
7744     }
7745 
7746     return 0;
7747 }
7748 
7749 static int open_self_maps(void *cpu_env, int fd)
7750 {
7751     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7752     TaskState *ts = cpu->opaque;
7753     GSList *map_info = read_self_maps();
7754     GSList *s;
7755     int count;
7756 
7757     for (s = map_info; s; s = g_slist_next(s)) {
7758         MapInfo *e = (MapInfo *) s->data;
7759 
7760         if (h2g_valid(e->start)) {
7761             unsigned long min = e->start;
7762             unsigned long max = e->end;
7763             int flags = page_get_flags(h2g(min));
7764             const char *path;
7765 
7766             max = h2g_valid(max - 1) ?
7767                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7768 
7769             if (page_check_range(h2g(min), max - min, flags) == -1) {
7770                 continue;
7771             }
7772 
7773             if (h2g(min) == ts->info->stack_limit) {
7774                 path = "[stack]";
7775             } else {
7776                 path = e->path;
7777             }
7778 
7779             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7780                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7781                             h2g(min), h2g(max - 1) + 1,
7782                             (flags & PAGE_READ) ? 'r' : '-',
7783                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7784                             (flags & PAGE_EXEC) ? 'x' : '-',
7785                             e->is_priv ? 'p' : '-',
7786                             (uint64_t) e->offset, e->dev, e->inode);
7787             if (path) {
7788                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7789             } else {
7790                 dprintf(fd, "\n");
7791             }
7792         }
7793     }
7794 
7795     free_self_maps(map_info);
7796 
7797 #ifdef TARGET_VSYSCALL_PAGE
7798     /*
7799      * We only support execution from the vsyscall page.
7800      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7801      */
7802     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7803                     " --xp 00000000 00:00 0",
7804                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7805     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7806 #endif
7807 
7808     return 0;
7809 }
7810 
7811 static int open_self_stat(void *cpu_env, int fd)
7812 {
7813     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7814     TaskState *ts = cpu->opaque;
7815     g_autoptr(GString) buf = g_string_new(NULL);
7816     int i;
7817 
7818     for (i = 0; i < 44; i++) {
7819         if (i == 0) {
7820             /* pid */
7821             g_string_printf(buf, FMT_pid " ", getpid());
7822         } else if (i == 1) {
7823             /* app name */
7824             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7825             bin = bin ? bin + 1 : ts->bprm->argv[0];
7826             g_string_printf(buf, "(%.15s) ", bin);
7827         } else if (i == 3) {
7828             /* ppid */
7829             g_string_printf(buf, FMT_pid " ", getppid());
7830         } else if (i == 27) {
7831             /* stack bottom */
7832             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7833         } else {
7834             /* for the rest, there is MasterCard */
7835             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7836         }
7837 
7838         if (write(fd, buf->str, buf->len) != buf->len) {
7839             return -1;
7840         }
7841     }
7842 
7843     return 0;
7844 }
7845 
7846 static int open_self_auxv(void *cpu_env, int fd)
7847 {
7848     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7849     TaskState *ts = cpu->opaque;
7850     abi_ulong auxv = ts->info->saved_auxv;
7851     abi_ulong len = ts->info->auxv_len;
7852     char *ptr;
7853 
7854     /*
7855      * Auxiliary vector is stored in target process stack.
7856      * read in whole auxv vector and copy it to file
7857      */
7858     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7859     if (ptr != NULL) {
7860         while (len > 0) {
7861             ssize_t r;
7862             r = write(fd, ptr, len);
7863             if (r <= 0) {
7864                 break;
7865             }
7866             len -= r;
7867             ptr += r;
7868         }
7869         lseek(fd, 0, SEEK_SET);
7870         unlock_user(ptr, auxv, len);
7871     }
7872 
7873     return 0;
7874 }
7875 
7876 static int is_proc_myself(const char *filename, const char *entry)
7877 {
7878     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7879         filename += strlen("/proc/");
7880         if (!strncmp(filename, "self/", strlen("self/"))) {
7881             filename += strlen("self/");
7882         } else if (*filename >= '1' && *filename <= '9') {
7883             char myself[80];
7884             snprintf(myself, sizeof(myself), "%d/", getpid());
7885             if (!strncmp(filename, myself, strlen(myself))) {
7886                 filename += strlen(myself);
7887             } else {
7888                 return 0;
7889             }
7890         } else {
7891             return 0;
7892         }
7893         if (!strcmp(filename, entry)) {
7894             return 1;
7895         }
7896     }
7897     return 0;
7898 }
7899 
7900 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7901     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7902 static int is_proc(const char *filename, const char *entry)
7903 {
7904     return strcmp(filename, entry) == 0;
7905 }
7906 #endif
7907 
7908 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7909 static int open_net_route(void *cpu_env, int fd)
7910 {
7911     FILE *fp;
7912     char *line = NULL;
7913     size_t len = 0;
7914     ssize_t read;
7915 
7916     fp = fopen("/proc/net/route", "r");
7917     if (fp == NULL) {
7918         return -1;
7919     }
7920 
7921     /* read header */
7922 
7923     read = getline(&line, &len, fp);
7924     dprintf(fd, "%s", line);
7925 
7926     /* read routes */
7927 
7928     while ((read = getline(&line, &len, fp)) != -1) {
7929         char iface[16];
7930         uint32_t dest, gw, mask;
7931         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7932         int fields;
7933 
7934         fields = sscanf(line,
7935                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7936                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7937                         &mask, &mtu, &window, &irtt);
7938         if (fields != 11) {
7939             continue;
7940         }
7941         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7942                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7943                 metric, tswap32(mask), mtu, window, irtt);
7944     }
7945 
7946     free(line);
7947     fclose(fp);
7948 
7949     return 0;
7950 }
7951 #endif
7952 
7953 #if defined(TARGET_SPARC)
7954 static int open_cpuinfo(void *cpu_env, int fd)
7955 {
7956     dprintf(fd, "type\t\t: sun4u\n");
7957     return 0;
7958 }
7959 #endif
7960 
7961 #if defined(TARGET_HPPA)
7962 static int open_cpuinfo(void *cpu_env, int fd)
7963 {
7964     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7965     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7966     dprintf(fd, "capabilities\t: os32\n");
7967     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7968     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7969     return 0;
7970 }
7971 #endif
7972 
7973 #if defined(TARGET_M68K)
7974 static int open_hardware(void *cpu_env, int fd)
7975 {
7976     dprintf(fd, "Model:\t\tqemu-m68k\n");
7977     return 0;
7978 }
7979 #endif
7980 
7981 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7982 {
7983     struct fake_open {
7984         const char *filename;
7985         int (*fill)(void *cpu_env, int fd);
7986         int (*cmp)(const char *s1, const char *s2);
7987     };
7988     const struct fake_open *fake_open;
7989     static const struct fake_open fakes[] = {
7990         { "maps", open_self_maps, is_proc_myself },
7991         { "stat", open_self_stat, is_proc_myself },
7992         { "auxv", open_self_auxv, is_proc_myself },
7993         { "cmdline", open_self_cmdline, is_proc_myself },
7994 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7995         { "/proc/net/route", open_net_route, is_proc },
7996 #endif
7997 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7998         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7999 #endif
8000 #if defined(TARGET_M68K)
8001         { "/proc/hardware", open_hardware, is_proc },
8002 #endif
8003         { NULL, NULL, NULL }
8004     };
8005 
8006     if (is_proc_myself(pathname, "exe")) {
8007         int execfd = qemu_getauxval(AT_EXECFD);
8008         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8009     }
8010 
8011     for (fake_open = fakes; fake_open->filename; fake_open++) {
8012         if (fake_open->cmp(pathname, fake_open->filename)) {
8013             break;
8014         }
8015     }
8016 
8017     if (fake_open->filename) {
8018         const char *tmpdir;
8019         char filename[PATH_MAX];
8020         int fd, r;
8021 
8022         /* create temporary file to map stat to */
8023         tmpdir = getenv("TMPDIR");
8024         if (!tmpdir)
8025             tmpdir = "/tmp";
8026         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8027         fd = mkstemp(filename);
8028         if (fd < 0) {
8029             return fd;
8030         }
8031         unlink(filename);
8032 
8033         if ((r = fake_open->fill(cpu_env, fd))) {
8034             int e = errno;
8035             close(fd);
8036             errno = e;
8037             return r;
8038         }
8039         lseek(fd, 0, SEEK_SET);
8040 
8041         return fd;
8042     }
8043 
8044     return safe_openat(dirfd, path(pathname), flags, mode);
8045 }
8046 
8047 #define TIMER_MAGIC 0x0caf0000
8048 #define TIMER_MAGIC_MASK 0xffff0000
8049 
8050 /* Convert QEMU provided timer ID back to internal 16bit index format */
8051 static target_timer_t get_timer_id(abi_long arg)
8052 {
8053     target_timer_t timerid = arg;
8054 
8055     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8056         return -TARGET_EINVAL;
8057     }
8058 
8059     timerid &= 0xffff;
8060 
8061     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8062         return -TARGET_EINVAL;
8063     }
8064 
8065     return timerid;
8066 }
8067 
8068 static int target_to_host_cpu_mask(unsigned long *host_mask,
8069                                    size_t host_size,
8070                                    abi_ulong target_addr,
8071                                    size_t target_size)
8072 {
8073     unsigned target_bits = sizeof(abi_ulong) * 8;
8074     unsigned host_bits = sizeof(*host_mask) * 8;
8075     abi_ulong *target_mask;
8076     unsigned i, j;
8077 
8078     assert(host_size >= target_size);
8079 
8080     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8081     if (!target_mask) {
8082         return -TARGET_EFAULT;
8083     }
8084     memset(host_mask, 0, host_size);
8085 
8086     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8087         unsigned bit = i * target_bits;
8088         abi_ulong val;
8089 
8090         __get_user(val, &target_mask[i]);
8091         for (j = 0; j < target_bits; j++, bit++) {
8092             if (val & (1UL << j)) {
8093                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8094             }
8095         }
8096     }
8097 
8098     unlock_user(target_mask, target_addr, 0);
8099     return 0;
8100 }
8101 
8102 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8103                                    size_t host_size,
8104                                    abi_ulong target_addr,
8105                                    size_t target_size)
8106 {
8107     unsigned target_bits = sizeof(abi_ulong) * 8;
8108     unsigned host_bits = sizeof(*host_mask) * 8;
8109     abi_ulong *target_mask;
8110     unsigned i, j;
8111 
8112     assert(host_size >= target_size);
8113 
8114     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8115     if (!target_mask) {
8116         return -TARGET_EFAULT;
8117     }
8118 
8119     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8120         unsigned bit = i * target_bits;
8121         abi_ulong val = 0;
8122 
8123         for (j = 0; j < target_bits; j++, bit++) {
8124             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8125                 val |= 1UL << j;
8126             }
8127         }
8128         __put_user(val, &target_mask[i]);
8129     }
8130 
8131     unlock_user(target_mask, target_addr, target_size);
8132     return 0;
8133 }
8134 
8135 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8136 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8137 #endif
8138 
8139 /* This is an internal helper for do_syscall so that it is easier
8140  * to have a single return point, so that actions, such as logging
8141  * of syscall results, can be performed.
8142  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8143  */
8144 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8145                             abi_long arg2, abi_long arg3, abi_long arg4,
8146                             abi_long arg5, abi_long arg6, abi_long arg7,
8147                             abi_long arg8)
8148 {
8149     CPUState *cpu = env_cpu(cpu_env);
8150     abi_long ret;
8151 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8152     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8153     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8154     || defined(TARGET_NR_statx)
8155     struct stat st;
8156 #endif
8157 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8158     || defined(TARGET_NR_fstatfs)
8159     struct statfs stfs;
8160 #endif
8161     void *p;
8162 
8163     switch(num) {
8164     case TARGET_NR_exit:
8165         /* In old applications this may be used to implement _exit(2).
8166            However in threaded applications it is used for thread termination,
8167            and _exit_group is used for application termination.
8168            Do thread termination if we have more then one thread.  */
8169 
8170         if (block_signals()) {
8171             return -TARGET_ERESTARTSYS;
8172         }
8173 
8174         pthread_mutex_lock(&clone_lock);
8175 
8176         if (CPU_NEXT(first_cpu)) {
8177             TaskState *ts = cpu->opaque;
8178 
8179             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8180             object_unref(OBJECT(cpu));
8181             /*
8182              * At this point the CPU should be unrealized and removed
8183              * from cpu lists. We can clean-up the rest of the thread
8184              * data without the lock held.
8185              */
8186 
8187             pthread_mutex_unlock(&clone_lock);
8188 
8189             if (ts->child_tidptr) {
8190                 put_user_u32(0, ts->child_tidptr);
8191                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8192                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8193             }
8194             thread_cpu = NULL;
8195             g_free(ts);
8196             rcu_unregister_thread();
8197             pthread_exit(NULL);
8198         }
8199 
8200         pthread_mutex_unlock(&clone_lock);
8201         preexit_cleanup(cpu_env, arg1);
8202         _exit(arg1);
8203         return 0; /* avoid warning */
8204     case TARGET_NR_read:
8205         if (arg2 == 0 && arg3 == 0) {
8206             return get_errno(safe_read(arg1, 0, 0));
8207         } else {
8208             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8209                 return -TARGET_EFAULT;
8210             ret = get_errno(safe_read(arg1, p, arg3));
8211             if (ret >= 0 &&
8212                 fd_trans_host_to_target_data(arg1)) {
8213                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8214             }
8215             unlock_user(p, arg2, ret);
8216         }
8217         return ret;
8218     case TARGET_NR_write:
8219         if (arg2 == 0 && arg3 == 0) {
8220             return get_errno(safe_write(arg1, 0, 0));
8221         }
8222         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8223             return -TARGET_EFAULT;
8224         if (fd_trans_target_to_host_data(arg1)) {
8225             void *copy = g_malloc(arg3);
8226             memcpy(copy, p, arg3);
8227             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8228             if (ret >= 0) {
8229                 ret = get_errno(safe_write(arg1, copy, ret));
8230             }
8231             g_free(copy);
8232         } else {
8233             ret = get_errno(safe_write(arg1, p, arg3));
8234         }
8235         unlock_user(p, arg2, 0);
8236         return ret;
8237 
8238 #ifdef TARGET_NR_open
8239     case TARGET_NR_open:
8240         if (!(p = lock_user_string(arg1)))
8241             return -TARGET_EFAULT;
8242         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8243                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8244                                   arg3));
8245         fd_trans_unregister(ret);
8246         unlock_user(p, arg1, 0);
8247         return ret;
8248 #endif
8249     case TARGET_NR_openat:
8250         if (!(p = lock_user_string(arg2)))
8251             return -TARGET_EFAULT;
8252         ret = get_errno(do_openat(cpu_env, arg1, p,
8253                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8254                                   arg4));
8255         fd_trans_unregister(ret);
8256         unlock_user(p, arg2, 0);
8257         return ret;
8258 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8259     case TARGET_NR_name_to_handle_at:
8260         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8261         return ret;
8262 #endif
8263 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8264     case TARGET_NR_open_by_handle_at:
8265         ret = do_open_by_handle_at(arg1, arg2, arg3);
8266         fd_trans_unregister(ret);
8267         return ret;
8268 #endif
8269     case TARGET_NR_close:
8270         fd_trans_unregister(arg1);
8271         return get_errno(close(arg1));
8272 
8273     case TARGET_NR_brk:
8274         return do_brk(arg1);
8275 #ifdef TARGET_NR_fork
8276     case TARGET_NR_fork:
8277         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8278 #endif
8279 #ifdef TARGET_NR_waitpid
8280     case TARGET_NR_waitpid:
8281         {
8282             int status;
8283             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8284             if (!is_error(ret) && arg2 && ret
8285                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8286                 return -TARGET_EFAULT;
8287         }
8288         return ret;
8289 #endif
8290 #ifdef TARGET_NR_waitid
8291     case TARGET_NR_waitid:
8292         {
8293             siginfo_t info;
8294             info.si_pid = 0;
8295             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8296             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8297                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8298                     return -TARGET_EFAULT;
8299                 host_to_target_siginfo(p, &info);
8300                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8301             }
8302         }
8303         return ret;
8304 #endif
8305 #ifdef TARGET_NR_creat /* not on alpha */
8306     case TARGET_NR_creat:
8307         if (!(p = lock_user_string(arg1)))
8308             return -TARGET_EFAULT;
8309         ret = get_errno(creat(p, arg2));
8310         fd_trans_unregister(ret);
8311         unlock_user(p, arg1, 0);
8312         return ret;
8313 #endif
8314 #ifdef TARGET_NR_link
8315     case TARGET_NR_link:
8316         {
8317             void * p2;
8318             p = lock_user_string(arg1);
8319             p2 = lock_user_string(arg2);
8320             if (!p || !p2)
8321                 ret = -TARGET_EFAULT;
8322             else
8323                 ret = get_errno(link(p, p2));
8324             unlock_user(p2, arg2, 0);
8325             unlock_user(p, arg1, 0);
8326         }
8327         return ret;
8328 #endif
8329 #if defined(TARGET_NR_linkat)
8330     case TARGET_NR_linkat:
8331         {
8332             void * p2 = NULL;
8333             if (!arg2 || !arg4)
8334                 return -TARGET_EFAULT;
8335             p  = lock_user_string(arg2);
8336             p2 = lock_user_string(arg4);
8337             if (!p || !p2)
8338                 ret = -TARGET_EFAULT;
8339             else
8340                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8341             unlock_user(p, arg2, 0);
8342             unlock_user(p2, arg4, 0);
8343         }
8344         return ret;
8345 #endif
8346 #ifdef TARGET_NR_unlink
8347     case TARGET_NR_unlink:
8348         if (!(p = lock_user_string(arg1)))
8349             return -TARGET_EFAULT;
8350         ret = get_errno(unlink(p));
8351         unlock_user(p, arg1, 0);
8352         return ret;
8353 #endif
8354 #if defined(TARGET_NR_unlinkat)
8355     case TARGET_NR_unlinkat:
8356         if (!(p = lock_user_string(arg2)))
8357             return -TARGET_EFAULT;
8358         ret = get_errno(unlinkat(arg1, p, arg3));
8359         unlock_user(p, arg2, 0);
8360         return ret;
8361 #endif
8362     case TARGET_NR_execve:
8363         {
8364             char **argp, **envp;
8365             int argc, envc;
8366             abi_ulong gp;
8367             abi_ulong guest_argp;
8368             abi_ulong guest_envp;
8369             abi_ulong addr;
8370             char **q;
8371 
8372             argc = 0;
8373             guest_argp = arg2;
8374             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8375                 if (get_user_ual(addr, gp))
8376                     return -TARGET_EFAULT;
8377                 if (!addr)
8378                     break;
8379                 argc++;
8380             }
8381             envc = 0;
8382             guest_envp = arg3;
8383             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8384                 if (get_user_ual(addr, gp))
8385                     return -TARGET_EFAULT;
8386                 if (!addr)
8387                     break;
8388                 envc++;
8389             }
8390 
8391             argp = g_new0(char *, argc + 1);
8392             envp = g_new0(char *, envc + 1);
8393 
8394             for (gp = guest_argp, q = argp; gp;
8395                   gp += sizeof(abi_ulong), q++) {
8396                 if (get_user_ual(addr, gp))
8397                     goto execve_efault;
8398                 if (!addr)
8399                     break;
8400                 if (!(*q = lock_user_string(addr)))
8401                     goto execve_efault;
8402             }
8403             *q = NULL;
8404 
8405             for (gp = guest_envp, q = envp; gp;
8406                   gp += sizeof(abi_ulong), q++) {
8407                 if (get_user_ual(addr, gp))
8408                     goto execve_efault;
8409                 if (!addr)
8410                     break;
8411                 if (!(*q = lock_user_string(addr)))
8412                     goto execve_efault;
8413             }
8414             *q = NULL;
8415 
8416             if (!(p = lock_user_string(arg1)))
8417                 goto execve_efault;
8418             /* Although execve() is not an interruptible syscall it is
8419              * a special case where we must use the safe_syscall wrapper:
8420              * if we allow a signal to happen before we make the host
8421              * syscall then we will 'lose' it, because at the point of
8422              * execve the process leaves QEMU's control. So we use the
8423              * safe syscall wrapper to ensure that we either take the
8424              * signal as a guest signal, or else it does not happen
8425              * before the execve completes and makes it the other
8426              * program's problem.
8427              */
8428             ret = get_errno(safe_execve(p, argp, envp));
8429             unlock_user(p, arg1, 0);
8430 
8431             goto execve_end;
8432 
8433         execve_efault:
8434             ret = -TARGET_EFAULT;
8435 
8436         execve_end:
8437             for (gp = guest_argp, q = argp; *q;
8438                   gp += sizeof(abi_ulong), q++) {
8439                 if (get_user_ual(addr, gp)
8440                     || !addr)
8441                     break;
8442                 unlock_user(*q, addr, 0);
8443             }
8444             for (gp = guest_envp, q = envp; *q;
8445                   gp += sizeof(abi_ulong), q++) {
8446                 if (get_user_ual(addr, gp)
8447                     || !addr)
8448                     break;
8449                 unlock_user(*q, addr, 0);
8450             }
8451 
8452             g_free(argp);
8453             g_free(envp);
8454         }
8455         return ret;
8456     case TARGET_NR_chdir:
8457         if (!(p = lock_user_string(arg1)))
8458             return -TARGET_EFAULT;
8459         ret = get_errno(chdir(p));
8460         unlock_user(p, arg1, 0);
8461         return ret;
8462 #ifdef TARGET_NR_time
8463     case TARGET_NR_time:
8464         {
8465             time_t host_time;
8466             ret = get_errno(time(&host_time));
8467             if (!is_error(ret)
8468                 && arg1
8469                 && put_user_sal(host_time, arg1))
8470                 return -TARGET_EFAULT;
8471         }
8472         return ret;
8473 #endif
8474 #ifdef TARGET_NR_mknod
8475     case TARGET_NR_mknod:
8476         if (!(p = lock_user_string(arg1)))
8477             return -TARGET_EFAULT;
8478         ret = get_errno(mknod(p, arg2, arg3));
8479         unlock_user(p, arg1, 0);
8480         return ret;
8481 #endif
8482 #if defined(TARGET_NR_mknodat)
8483     case TARGET_NR_mknodat:
8484         if (!(p = lock_user_string(arg2)))
8485             return -TARGET_EFAULT;
8486         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8487         unlock_user(p, arg2, 0);
8488         return ret;
8489 #endif
8490 #ifdef TARGET_NR_chmod
8491     case TARGET_NR_chmod:
8492         if (!(p = lock_user_string(arg1)))
8493             return -TARGET_EFAULT;
8494         ret = get_errno(chmod(p, arg2));
8495         unlock_user(p, arg1, 0);
8496         return ret;
8497 #endif
8498 #ifdef TARGET_NR_lseek
8499     case TARGET_NR_lseek:
8500         return get_errno(lseek(arg1, arg2, arg3));
8501 #endif
8502 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8503     /* Alpha specific */
8504     case TARGET_NR_getxpid:
8505         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8506         return get_errno(getpid());
8507 #endif
8508 #ifdef TARGET_NR_getpid
8509     case TARGET_NR_getpid:
8510         return get_errno(getpid());
8511 #endif
8512     case TARGET_NR_mount:
8513         {
8514             /* need to look at the data field */
8515             void *p2, *p3;
8516 
8517             if (arg1) {
8518                 p = lock_user_string(arg1);
8519                 if (!p) {
8520                     return -TARGET_EFAULT;
8521                 }
8522             } else {
8523                 p = NULL;
8524             }
8525 
8526             p2 = lock_user_string(arg2);
8527             if (!p2) {
8528                 if (arg1) {
8529                     unlock_user(p, arg1, 0);
8530                 }
8531                 return -TARGET_EFAULT;
8532             }
8533 
8534             if (arg3) {
8535                 p3 = lock_user_string(arg3);
8536                 if (!p3) {
8537                     if (arg1) {
8538                         unlock_user(p, arg1, 0);
8539                     }
8540                     unlock_user(p2, arg2, 0);
8541                     return -TARGET_EFAULT;
8542                 }
8543             } else {
8544                 p3 = NULL;
8545             }
8546 
8547             /* FIXME - arg5 should be locked, but it isn't clear how to
8548              * do that since it's not guaranteed to be a NULL-terminated
8549              * string.
8550              */
8551             if (!arg5) {
8552                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8553             } else {
8554                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8555             }
8556             ret = get_errno(ret);
8557 
8558             if (arg1) {
8559                 unlock_user(p, arg1, 0);
8560             }
8561             unlock_user(p2, arg2, 0);
8562             if (arg3) {
8563                 unlock_user(p3, arg3, 0);
8564             }
8565         }
8566         return ret;
8567 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8568 #if defined(TARGET_NR_umount)
8569     case TARGET_NR_umount:
8570 #endif
8571 #if defined(TARGET_NR_oldumount)
8572     case TARGET_NR_oldumount:
8573 #endif
8574         if (!(p = lock_user_string(arg1)))
8575             return -TARGET_EFAULT;
8576         ret = get_errno(umount(p));
8577         unlock_user(p, arg1, 0);
8578         return ret;
8579 #endif
8580 #ifdef TARGET_NR_stime /* not on alpha */
8581     case TARGET_NR_stime:
8582         {
8583             struct timespec ts;
8584             ts.tv_nsec = 0;
8585             if (get_user_sal(ts.tv_sec, arg1)) {
8586                 return -TARGET_EFAULT;
8587             }
8588             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8589         }
8590 #endif
8591 #ifdef TARGET_NR_alarm /* not on alpha */
8592     case TARGET_NR_alarm:
8593         return alarm(arg1);
8594 #endif
8595 #ifdef TARGET_NR_pause /* not on alpha */
8596     case TARGET_NR_pause:
8597         if (!block_signals()) {
8598             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8599         }
8600         return -TARGET_EINTR;
8601 #endif
8602 #ifdef TARGET_NR_utime
8603     case TARGET_NR_utime:
8604         {
8605             struct utimbuf tbuf, *host_tbuf;
8606             struct target_utimbuf *target_tbuf;
8607             if (arg2) {
8608                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8609                     return -TARGET_EFAULT;
8610                 tbuf.actime = tswapal(target_tbuf->actime);
8611                 tbuf.modtime = tswapal(target_tbuf->modtime);
8612                 unlock_user_struct(target_tbuf, arg2, 0);
8613                 host_tbuf = &tbuf;
8614             } else {
8615                 host_tbuf = NULL;
8616             }
8617             if (!(p = lock_user_string(arg1)))
8618                 return -TARGET_EFAULT;
8619             ret = get_errno(utime(p, host_tbuf));
8620             unlock_user(p, arg1, 0);
8621         }
8622         return ret;
8623 #endif
8624 #ifdef TARGET_NR_utimes
8625     case TARGET_NR_utimes:
8626         {
8627             struct timeval *tvp, tv[2];
8628             if (arg2) {
8629                 if (copy_from_user_timeval(&tv[0], arg2)
8630                     || copy_from_user_timeval(&tv[1],
8631                                               arg2 + sizeof(struct target_timeval)))
8632                     return -TARGET_EFAULT;
8633                 tvp = tv;
8634             } else {
8635                 tvp = NULL;
8636             }
8637             if (!(p = lock_user_string(arg1)))
8638                 return -TARGET_EFAULT;
8639             ret = get_errno(utimes(p, tvp));
8640             unlock_user(p, arg1, 0);
8641         }
8642         return ret;
8643 #endif
8644 #if defined(TARGET_NR_futimesat)
8645     case TARGET_NR_futimesat:
8646         {
8647             struct timeval *tvp, tv[2];
8648             if (arg3) {
8649                 if (copy_from_user_timeval(&tv[0], arg3)
8650                     || copy_from_user_timeval(&tv[1],
8651                                               arg3 + sizeof(struct target_timeval)))
8652                     return -TARGET_EFAULT;
8653                 tvp = tv;
8654             } else {
8655                 tvp = NULL;
8656             }
8657             if (!(p = lock_user_string(arg2))) {
8658                 return -TARGET_EFAULT;
8659             }
8660             ret = get_errno(futimesat(arg1, path(p), tvp));
8661             unlock_user(p, arg2, 0);
8662         }
8663         return ret;
8664 #endif
8665 #ifdef TARGET_NR_access
8666     case TARGET_NR_access:
8667         if (!(p = lock_user_string(arg1))) {
8668             return -TARGET_EFAULT;
8669         }
8670         ret = get_errno(access(path(p), arg2));
8671         unlock_user(p, arg1, 0);
8672         return ret;
8673 #endif
8674 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8675     case TARGET_NR_faccessat:
8676         if (!(p = lock_user_string(arg2))) {
8677             return -TARGET_EFAULT;
8678         }
8679         ret = get_errno(faccessat(arg1, p, arg3, 0));
8680         unlock_user(p, arg2, 0);
8681         return ret;
8682 #endif
8683 #ifdef TARGET_NR_nice /* not on alpha */
8684     case TARGET_NR_nice:
8685         return get_errno(nice(arg1));
8686 #endif
8687     case TARGET_NR_sync:
8688         sync();
8689         return 0;
8690 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8691     case TARGET_NR_syncfs:
8692         return get_errno(syncfs(arg1));
8693 #endif
8694     case TARGET_NR_kill:
8695         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8696 #ifdef TARGET_NR_rename
8697     case TARGET_NR_rename:
8698         {
8699             void *p2;
8700             p = lock_user_string(arg1);
8701             p2 = lock_user_string(arg2);
8702             if (!p || !p2)
8703                 ret = -TARGET_EFAULT;
8704             else
8705                 ret = get_errno(rename(p, p2));
8706             unlock_user(p2, arg2, 0);
8707             unlock_user(p, arg1, 0);
8708         }
8709         return ret;
8710 #endif
8711 #if defined(TARGET_NR_renameat)
8712     case TARGET_NR_renameat:
8713         {
8714             void *p2;
8715             p  = lock_user_string(arg2);
8716             p2 = lock_user_string(arg4);
8717             if (!p || !p2)
8718                 ret = -TARGET_EFAULT;
8719             else
8720                 ret = get_errno(renameat(arg1, p, arg3, p2));
8721             unlock_user(p2, arg4, 0);
8722             unlock_user(p, arg2, 0);
8723         }
8724         return ret;
8725 #endif
8726 #if defined(TARGET_NR_renameat2)
8727     case TARGET_NR_renameat2:
8728         {
8729             void *p2;
8730             p  = lock_user_string(arg2);
8731             p2 = lock_user_string(arg4);
8732             if (!p || !p2) {
8733                 ret = -TARGET_EFAULT;
8734             } else {
8735                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8736             }
8737             unlock_user(p2, arg4, 0);
8738             unlock_user(p, arg2, 0);
8739         }
8740         return ret;
8741 #endif
8742 #ifdef TARGET_NR_mkdir
8743     case TARGET_NR_mkdir:
8744         if (!(p = lock_user_string(arg1)))
8745             return -TARGET_EFAULT;
8746         ret = get_errno(mkdir(p, arg2));
8747         unlock_user(p, arg1, 0);
8748         return ret;
8749 #endif
8750 #if defined(TARGET_NR_mkdirat)
8751     case TARGET_NR_mkdirat:
8752         if (!(p = lock_user_string(arg2)))
8753             return -TARGET_EFAULT;
8754         ret = get_errno(mkdirat(arg1, p, arg3));
8755         unlock_user(p, arg2, 0);
8756         return ret;
8757 #endif
8758 #ifdef TARGET_NR_rmdir
8759     case TARGET_NR_rmdir:
8760         if (!(p = lock_user_string(arg1)))
8761             return -TARGET_EFAULT;
8762         ret = get_errno(rmdir(p));
8763         unlock_user(p, arg1, 0);
8764         return ret;
8765 #endif
8766     case TARGET_NR_dup:
8767         ret = get_errno(dup(arg1));
8768         if (ret >= 0) {
8769             fd_trans_dup(arg1, ret);
8770         }
8771         return ret;
8772 #ifdef TARGET_NR_pipe
8773     case TARGET_NR_pipe:
8774         return do_pipe(cpu_env, arg1, 0, 0);
8775 #endif
8776 #ifdef TARGET_NR_pipe2
8777     case TARGET_NR_pipe2:
8778         return do_pipe(cpu_env, arg1,
8779                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8780 #endif
8781     case TARGET_NR_times:
8782         {
8783             struct target_tms *tmsp;
8784             struct tms tms;
8785             ret = get_errno(times(&tms));
8786             if (arg1) {
8787                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8788                 if (!tmsp)
8789                     return -TARGET_EFAULT;
8790                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8791                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8792                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8793                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8794             }
8795             if (!is_error(ret))
8796                 ret = host_to_target_clock_t(ret);
8797         }
8798         return ret;
8799     case TARGET_NR_acct:
8800         if (arg1 == 0) {
8801             ret = get_errno(acct(NULL));
8802         } else {
8803             if (!(p = lock_user_string(arg1))) {
8804                 return -TARGET_EFAULT;
8805             }
8806             ret = get_errno(acct(path(p)));
8807             unlock_user(p, arg1, 0);
8808         }
8809         return ret;
8810 #ifdef TARGET_NR_umount2
8811     case TARGET_NR_umount2:
8812         if (!(p = lock_user_string(arg1)))
8813             return -TARGET_EFAULT;
8814         ret = get_errno(umount2(p, arg2));
8815         unlock_user(p, arg1, 0);
8816         return ret;
8817 #endif
8818     case TARGET_NR_ioctl:
8819         return do_ioctl(arg1, arg2, arg3);
8820 #ifdef TARGET_NR_fcntl
8821     case TARGET_NR_fcntl:
8822         return do_fcntl(arg1, arg2, arg3);
8823 #endif
8824     case TARGET_NR_setpgid:
8825         return get_errno(setpgid(arg1, arg2));
8826     case TARGET_NR_umask:
8827         return get_errno(umask(arg1));
8828     case TARGET_NR_chroot:
8829         if (!(p = lock_user_string(arg1)))
8830             return -TARGET_EFAULT;
8831         ret = get_errno(chroot(p));
8832         unlock_user(p, arg1, 0);
8833         return ret;
8834 #ifdef TARGET_NR_dup2
8835     case TARGET_NR_dup2:
8836         ret = get_errno(dup2(arg1, arg2));
8837         if (ret >= 0) {
8838             fd_trans_dup(arg1, arg2);
8839         }
8840         return ret;
8841 #endif
8842 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8843     case TARGET_NR_dup3:
8844     {
8845         int host_flags;
8846 
8847         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8848             return -EINVAL;
8849         }
8850         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8851         ret = get_errno(dup3(arg1, arg2, host_flags));
8852         if (ret >= 0) {
8853             fd_trans_dup(arg1, arg2);
8854         }
8855         return ret;
8856     }
8857 #endif
8858 #ifdef TARGET_NR_getppid /* not on alpha */
8859     case TARGET_NR_getppid:
8860         return get_errno(getppid());
8861 #endif
8862 #ifdef TARGET_NR_getpgrp
8863     case TARGET_NR_getpgrp:
8864         return get_errno(getpgrp());
8865 #endif
8866     case TARGET_NR_setsid:
8867         return get_errno(setsid());
8868 #ifdef TARGET_NR_sigaction
8869     case TARGET_NR_sigaction:
8870         {
8871 #if defined(TARGET_MIPS)
8872 	    struct target_sigaction act, oact, *pact, *old_act;
8873 
8874 	    if (arg2) {
8875                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8876                     return -TARGET_EFAULT;
8877 		act._sa_handler = old_act->_sa_handler;
8878 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8879 		act.sa_flags = old_act->sa_flags;
8880 		unlock_user_struct(old_act, arg2, 0);
8881 		pact = &act;
8882 	    } else {
8883 		pact = NULL;
8884 	    }
8885 
8886         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8887 
8888 	    if (!is_error(ret) && arg3) {
8889                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8890                     return -TARGET_EFAULT;
8891 		old_act->_sa_handler = oact._sa_handler;
8892 		old_act->sa_flags = oact.sa_flags;
8893 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8894 		old_act->sa_mask.sig[1] = 0;
8895 		old_act->sa_mask.sig[2] = 0;
8896 		old_act->sa_mask.sig[3] = 0;
8897 		unlock_user_struct(old_act, arg3, 1);
8898 	    }
8899 #else
8900             struct target_old_sigaction *old_act;
8901             struct target_sigaction act, oact, *pact;
8902             if (arg2) {
8903                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8904                     return -TARGET_EFAULT;
8905                 act._sa_handler = old_act->_sa_handler;
8906                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8907                 act.sa_flags = old_act->sa_flags;
8908 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8909                 act.sa_restorer = old_act->sa_restorer;
8910 #endif
8911                 unlock_user_struct(old_act, arg2, 0);
8912                 pact = &act;
8913             } else {
8914                 pact = NULL;
8915             }
8916             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8917             if (!is_error(ret) && arg3) {
8918                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8919                     return -TARGET_EFAULT;
8920                 old_act->_sa_handler = oact._sa_handler;
8921                 old_act->sa_mask = oact.sa_mask.sig[0];
8922                 old_act->sa_flags = oact.sa_flags;
8923 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8924                 old_act->sa_restorer = oact.sa_restorer;
8925 #endif
8926                 unlock_user_struct(old_act, arg3, 1);
8927             }
8928 #endif
8929         }
8930         return ret;
8931 #endif
8932     case TARGET_NR_rt_sigaction:
8933         {
8934             /*
8935              * For Alpha and SPARC this is a 5 argument syscall, with
8936              * a 'restorer' parameter which must be copied into the
8937              * sa_restorer field of the sigaction struct.
8938              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8939              * and arg5 is the sigsetsize.
8940              */
8941 #if defined(TARGET_ALPHA)
8942             target_ulong sigsetsize = arg4;
8943             target_ulong restorer = arg5;
8944 #elif defined(TARGET_SPARC)
8945             target_ulong restorer = arg4;
8946             target_ulong sigsetsize = arg5;
8947 #else
8948             target_ulong sigsetsize = arg4;
8949             target_ulong restorer = 0;
8950 #endif
8951             struct target_sigaction *act = NULL;
8952             struct target_sigaction *oact = NULL;
8953 
8954             if (sigsetsize != sizeof(target_sigset_t)) {
8955                 return -TARGET_EINVAL;
8956             }
8957             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8958                 return -TARGET_EFAULT;
8959             }
8960             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8961                 ret = -TARGET_EFAULT;
8962             } else {
8963                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
8964                 if (oact) {
8965                     unlock_user_struct(oact, arg3, 1);
8966                 }
8967             }
8968             if (act) {
8969                 unlock_user_struct(act, arg2, 0);
8970             }
8971         }
8972         return ret;
8973 #ifdef TARGET_NR_sgetmask /* not on alpha */
8974     case TARGET_NR_sgetmask:
8975         {
8976             sigset_t cur_set;
8977             abi_ulong target_set;
8978             ret = do_sigprocmask(0, NULL, &cur_set);
8979             if (!ret) {
8980                 host_to_target_old_sigset(&target_set, &cur_set);
8981                 ret = target_set;
8982             }
8983         }
8984         return ret;
8985 #endif
8986 #ifdef TARGET_NR_ssetmask /* not on alpha */
8987     case TARGET_NR_ssetmask:
8988         {
8989             sigset_t set, oset;
8990             abi_ulong target_set = arg1;
8991             target_to_host_old_sigset(&set, &target_set);
8992             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8993             if (!ret) {
8994                 host_to_target_old_sigset(&target_set, &oset);
8995                 ret = target_set;
8996             }
8997         }
8998         return ret;
8999 #endif
9000 #ifdef TARGET_NR_sigprocmask
9001     case TARGET_NR_sigprocmask:
9002         {
9003 #if defined(TARGET_ALPHA)
9004             sigset_t set, oldset;
9005             abi_ulong mask;
9006             int how;
9007 
9008             switch (arg1) {
9009             case TARGET_SIG_BLOCK:
9010                 how = SIG_BLOCK;
9011                 break;
9012             case TARGET_SIG_UNBLOCK:
9013                 how = SIG_UNBLOCK;
9014                 break;
9015             case TARGET_SIG_SETMASK:
9016                 how = SIG_SETMASK;
9017                 break;
9018             default:
9019                 return -TARGET_EINVAL;
9020             }
9021             mask = arg2;
9022             target_to_host_old_sigset(&set, &mask);
9023 
9024             ret = do_sigprocmask(how, &set, &oldset);
9025             if (!is_error(ret)) {
9026                 host_to_target_old_sigset(&mask, &oldset);
9027                 ret = mask;
9028                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9029             }
9030 #else
9031             sigset_t set, oldset, *set_ptr;
9032             int how;
9033 
9034             if (arg2) {
9035                 switch (arg1) {
9036                 case TARGET_SIG_BLOCK:
9037                     how = SIG_BLOCK;
9038                     break;
9039                 case TARGET_SIG_UNBLOCK:
9040                     how = SIG_UNBLOCK;
9041                     break;
9042                 case TARGET_SIG_SETMASK:
9043                     how = SIG_SETMASK;
9044                     break;
9045                 default:
9046                     return -TARGET_EINVAL;
9047                 }
9048                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9049                     return -TARGET_EFAULT;
9050                 target_to_host_old_sigset(&set, p);
9051                 unlock_user(p, arg2, 0);
9052                 set_ptr = &set;
9053             } else {
9054                 how = 0;
9055                 set_ptr = NULL;
9056             }
9057             ret = do_sigprocmask(how, set_ptr, &oldset);
9058             if (!is_error(ret) && arg3) {
9059                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9060                     return -TARGET_EFAULT;
9061                 host_to_target_old_sigset(p, &oldset);
9062                 unlock_user(p, arg3, sizeof(target_sigset_t));
9063             }
9064 #endif
9065         }
9066         return ret;
9067 #endif
9068     case TARGET_NR_rt_sigprocmask:
9069         {
9070             int how = arg1;
9071             sigset_t set, oldset, *set_ptr;
9072 
9073             if (arg4 != sizeof(target_sigset_t)) {
9074                 return -TARGET_EINVAL;
9075             }
9076 
9077             if (arg2) {
9078                 switch(how) {
9079                 case TARGET_SIG_BLOCK:
9080                     how = SIG_BLOCK;
9081                     break;
9082                 case TARGET_SIG_UNBLOCK:
9083                     how = SIG_UNBLOCK;
9084                     break;
9085                 case TARGET_SIG_SETMASK:
9086                     how = SIG_SETMASK;
9087                     break;
9088                 default:
9089                     return -TARGET_EINVAL;
9090                 }
9091                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9092                     return -TARGET_EFAULT;
9093                 target_to_host_sigset(&set, p);
9094                 unlock_user(p, arg2, 0);
9095                 set_ptr = &set;
9096             } else {
9097                 how = 0;
9098                 set_ptr = NULL;
9099             }
9100             ret = do_sigprocmask(how, set_ptr, &oldset);
9101             if (!is_error(ret) && arg3) {
9102                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9103                     return -TARGET_EFAULT;
9104                 host_to_target_sigset(p, &oldset);
9105                 unlock_user(p, arg3, sizeof(target_sigset_t));
9106             }
9107         }
9108         return ret;
9109 #ifdef TARGET_NR_sigpending
9110     case TARGET_NR_sigpending:
9111         {
9112             sigset_t set;
9113             ret = get_errno(sigpending(&set));
9114             if (!is_error(ret)) {
9115                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9116                     return -TARGET_EFAULT;
9117                 host_to_target_old_sigset(p, &set);
9118                 unlock_user(p, arg1, sizeof(target_sigset_t));
9119             }
9120         }
9121         return ret;
9122 #endif
9123     case TARGET_NR_rt_sigpending:
9124         {
9125             sigset_t set;
9126 
9127             /* Yes, this check is >, not != like most. We follow the kernel's
9128              * logic and it does it like this because it implements
9129              * NR_sigpending through the same code path, and in that case
9130              * the old_sigset_t is smaller in size.
9131              */
9132             if (arg2 > sizeof(target_sigset_t)) {
9133                 return -TARGET_EINVAL;
9134             }
9135 
9136             ret = get_errno(sigpending(&set));
9137             if (!is_error(ret)) {
9138                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9139                     return -TARGET_EFAULT;
9140                 host_to_target_sigset(p, &set);
9141                 unlock_user(p, arg1, sizeof(target_sigset_t));
9142             }
9143         }
9144         return ret;
9145 #ifdef TARGET_NR_sigsuspend
9146     case TARGET_NR_sigsuspend:
9147         {
9148             TaskState *ts = cpu->opaque;
9149 #if defined(TARGET_ALPHA)
9150             abi_ulong mask = arg1;
9151             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9152 #else
9153             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9154                 return -TARGET_EFAULT;
9155             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9156             unlock_user(p, arg1, 0);
9157 #endif
9158             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9159                                                SIGSET_T_SIZE));
9160             if (ret != -TARGET_ERESTARTSYS) {
9161                 ts->in_sigsuspend = 1;
9162             }
9163         }
9164         return ret;
9165 #endif
9166     case TARGET_NR_rt_sigsuspend:
9167         {
9168             TaskState *ts = cpu->opaque;
9169 
9170             if (arg2 != sizeof(target_sigset_t)) {
9171                 return -TARGET_EINVAL;
9172             }
9173             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9174                 return -TARGET_EFAULT;
9175             target_to_host_sigset(&ts->sigsuspend_mask, p);
9176             unlock_user(p, arg1, 0);
9177             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9178                                                SIGSET_T_SIZE));
9179             if (ret != -TARGET_ERESTARTSYS) {
9180                 ts->in_sigsuspend = 1;
9181             }
9182         }
9183         return ret;
9184 #ifdef TARGET_NR_rt_sigtimedwait
9185     case TARGET_NR_rt_sigtimedwait:
9186         {
9187             sigset_t set;
9188             struct timespec uts, *puts;
9189             siginfo_t uinfo;
9190 
9191             if (arg4 != sizeof(target_sigset_t)) {
9192                 return -TARGET_EINVAL;
9193             }
9194 
9195             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9196                 return -TARGET_EFAULT;
9197             target_to_host_sigset(&set, p);
9198             unlock_user(p, arg1, 0);
9199             if (arg3) {
9200                 puts = &uts;
9201                 if (target_to_host_timespec(puts, arg3)) {
9202                     return -TARGET_EFAULT;
9203                 }
9204             } else {
9205                 puts = NULL;
9206             }
9207             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9208                                                  SIGSET_T_SIZE));
9209             if (!is_error(ret)) {
9210                 if (arg2) {
9211                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9212                                   0);
9213                     if (!p) {
9214                         return -TARGET_EFAULT;
9215                     }
9216                     host_to_target_siginfo(p, &uinfo);
9217                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9218                 }
9219                 ret = host_to_target_signal(ret);
9220             }
9221         }
9222         return ret;
9223 #endif
9224 #ifdef TARGET_NR_rt_sigtimedwait_time64
9225     case TARGET_NR_rt_sigtimedwait_time64:
9226         {
9227             sigset_t set;
9228             struct timespec uts, *puts;
9229             siginfo_t uinfo;
9230 
9231             if (arg4 != sizeof(target_sigset_t)) {
9232                 return -TARGET_EINVAL;
9233             }
9234 
9235             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9236             if (!p) {
9237                 return -TARGET_EFAULT;
9238             }
9239             target_to_host_sigset(&set, p);
9240             unlock_user(p, arg1, 0);
9241             if (arg3) {
9242                 puts = &uts;
9243                 if (target_to_host_timespec64(puts, arg3)) {
9244                     return -TARGET_EFAULT;
9245                 }
9246             } else {
9247                 puts = NULL;
9248             }
9249             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9250                                                  SIGSET_T_SIZE));
9251             if (!is_error(ret)) {
9252                 if (arg2) {
9253                     p = lock_user(VERIFY_WRITE, arg2,
9254                                   sizeof(target_siginfo_t), 0);
9255                     if (!p) {
9256                         return -TARGET_EFAULT;
9257                     }
9258                     host_to_target_siginfo(p, &uinfo);
9259                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9260                 }
9261                 ret = host_to_target_signal(ret);
9262             }
9263         }
9264         return ret;
9265 #endif
9266     case TARGET_NR_rt_sigqueueinfo:
9267         {
9268             siginfo_t uinfo;
9269 
9270             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9271             if (!p) {
9272                 return -TARGET_EFAULT;
9273             }
9274             target_to_host_siginfo(&uinfo, p);
9275             unlock_user(p, arg3, 0);
9276             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9277         }
9278         return ret;
9279     case TARGET_NR_rt_tgsigqueueinfo:
9280         {
9281             siginfo_t uinfo;
9282 
9283             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9284             if (!p) {
9285                 return -TARGET_EFAULT;
9286             }
9287             target_to_host_siginfo(&uinfo, p);
9288             unlock_user(p, arg4, 0);
9289             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9290         }
9291         return ret;
9292 #ifdef TARGET_NR_sigreturn
9293     case TARGET_NR_sigreturn:
9294         if (block_signals()) {
9295             return -TARGET_ERESTARTSYS;
9296         }
9297         return do_sigreturn(cpu_env);
9298 #endif
9299     case TARGET_NR_rt_sigreturn:
9300         if (block_signals()) {
9301             return -TARGET_ERESTARTSYS;
9302         }
9303         return do_rt_sigreturn(cpu_env);
9304     case TARGET_NR_sethostname:
9305         if (!(p = lock_user_string(arg1)))
9306             return -TARGET_EFAULT;
9307         ret = get_errno(sethostname(p, arg2));
9308         unlock_user(p, arg1, 0);
9309         return ret;
9310 #ifdef TARGET_NR_setrlimit
9311     case TARGET_NR_setrlimit:
9312         {
9313             int resource = target_to_host_resource(arg1);
9314             struct target_rlimit *target_rlim;
9315             struct rlimit rlim;
9316             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9317                 return -TARGET_EFAULT;
9318             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9319             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9320             unlock_user_struct(target_rlim, arg2, 0);
9321             /*
9322              * If we just passed through resource limit settings for memory then
9323              * they would also apply to QEMU's own allocations, and QEMU will
9324              * crash or hang or die if its allocations fail. Ideally we would
9325              * track the guest allocations in QEMU and apply the limits ourselves.
9326              * For now, just tell the guest the call succeeded but don't actually
9327              * limit anything.
9328              */
9329             if (resource != RLIMIT_AS &&
9330                 resource != RLIMIT_DATA &&
9331                 resource != RLIMIT_STACK) {
9332                 return get_errno(setrlimit(resource, &rlim));
9333             } else {
9334                 return 0;
9335             }
9336         }
9337 #endif
9338 #ifdef TARGET_NR_getrlimit
9339     case TARGET_NR_getrlimit:
9340         {
9341             int resource = target_to_host_resource(arg1);
9342             struct target_rlimit *target_rlim;
9343             struct rlimit rlim;
9344 
9345             ret = get_errno(getrlimit(resource, &rlim));
9346             if (!is_error(ret)) {
9347                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9348                     return -TARGET_EFAULT;
9349                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9350                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9351                 unlock_user_struct(target_rlim, arg2, 1);
9352             }
9353         }
9354         return ret;
9355 #endif
9356     case TARGET_NR_getrusage:
9357         {
9358             struct rusage rusage;
9359             ret = get_errno(getrusage(arg1, &rusage));
9360             if (!is_error(ret)) {
9361                 ret = host_to_target_rusage(arg2, &rusage);
9362             }
9363         }
9364         return ret;
9365 #if defined(TARGET_NR_gettimeofday)
9366     case TARGET_NR_gettimeofday:
9367         {
9368             struct timeval tv;
9369             struct timezone tz;
9370 
9371             ret = get_errno(gettimeofday(&tv, &tz));
9372             if (!is_error(ret)) {
9373                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9374                     return -TARGET_EFAULT;
9375                 }
9376                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9377                     return -TARGET_EFAULT;
9378                 }
9379             }
9380         }
9381         return ret;
9382 #endif
9383 #if defined(TARGET_NR_settimeofday)
9384     case TARGET_NR_settimeofday:
9385         {
9386             struct timeval tv, *ptv = NULL;
9387             struct timezone tz, *ptz = NULL;
9388 
9389             if (arg1) {
9390                 if (copy_from_user_timeval(&tv, arg1)) {
9391                     return -TARGET_EFAULT;
9392                 }
9393                 ptv = &tv;
9394             }
9395 
9396             if (arg2) {
9397                 if (copy_from_user_timezone(&tz, arg2)) {
9398                     return -TARGET_EFAULT;
9399                 }
9400                 ptz = &tz;
9401             }
9402 
9403             return get_errno(settimeofday(ptv, ptz));
9404         }
9405 #endif
9406 #if defined(TARGET_NR_select)
9407     case TARGET_NR_select:
9408 #if defined(TARGET_WANT_NI_OLD_SELECT)
9409         /* some architectures used to have old_select here
9410          * but now ENOSYS it.
9411          */
9412         ret = -TARGET_ENOSYS;
9413 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9414         ret = do_old_select(arg1);
9415 #else
9416         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9417 #endif
9418         return ret;
9419 #endif
9420 #ifdef TARGET_NR_pselect6
9421     case TARGET_NR_pselect6:
9422         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9423 #endif
9424 #ifdef TARGET_NR_pselect6_time64
9425     case TARGET_NR_pselect6_time64:
9426         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9427 #endif
9428 #ifdef TARGET_NR_symlink
9429     case TARGET_NR_symlink:
9430         {
9431             void *p2;
9432             p = lock_user_string(arg1);
9433             p2 = lock_user_string(arg2);
9434             if (!p || !p2)
9435                 ret = -TARGET_EFAULT;
9436             else
9437                 ret = get_errno(symlink(p, p2));
9438             unlock_user(p2, arg2, 0);
9439             unlock_user(p, arg1, 0);
9440         }
9441         return ret;
9442 #endif
9443 #if defined(TARGET_NR_symlinkat)
9444     case TARGET_NR_symlinkat:
9445         {
9446             void *p2;
9447             p  = lock_user_string(arg1);
9448             p2 = lock_user_string(arg3);
9449             if (!p || !p2)
9450                 ret = -TARGET_EFAULT;
9451             else
9452                 ret = get_errno(symlinkat(p, arg2, p2));
9453             unlock_user(p2, arg3, 0);
9454             unlock_user(p, arg1, 0);
9455         }
9456         return ret;
9457 #endif
9458 #ifdef TARGET_NR_readlink
9459     case TARGET_NR_readlink:
9460         {
9461             void *p2;
9462             p = lock_user_string(arg1);
9463             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9464             if (!p || !p2) {
9465                 ret = -TARGET_EFAULT;
9466             } else if (!arg3) {
9467                 /* Short circuit this for the magic exe check. */
9468                 ret = -TARGET_EINVAL;
9469             } else if (is_proc_myself((const char *)p, "exe")) {
9470                 char real[PATH_MAX], *temp;
9471                 temp = realpath(exec_path, real);
9472                 /* Return value is # of bytes that we wrote to the buffer. */
9473                 if (temp == NULL) {
9474                     ret = get_errno(-1);
9475                 } else {
9476                     /* Don't worry about sign mismatch as earlier mapping
9477                      * logic would have thrown a bad address error. */
9478                     ret = MIN(strlen(real), arg3);
9479                     /* We cannot NUL terminate the string. */
9480                     memcpy(p2, real, ret);
9481                 }
9482             } else {
9483                 ret = get_errno(readlink(path(p), p2, arg3));
9484             }
9485             unlock_user(p2, arg2, ret);
9486             unlock_user(p, arg1, 0);
9487         }
9488         return ret;
9489 #endif
9490 #if defined(TARGET_NR_readlinkat)
9491     case TARGET_NR_readlinkat:
9492         {
9493             void *p2;
9494             p  = lock_user_string(arg2);
9495             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9496             if (!p || !p2) {
9497                 ret = -TARGET_EFAULT;
9498             } else if (is_proc_myself((const char *)p, "exe")) {
9499                 char real[PATH_MAX], *temp;
9500                 temp = realpath(exec_path, real);
9501                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9502                 snprintf((char *)p2, arg4, "%s", real);
9503             } else {
9504                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9505             }
9506             unlock_user(p2, arg3, ret);
9507             unlock_user(p, arg2, 0);
9508         }
9509         return ret;
9510 #endif
9511 #ifdef TARGET_NR_swapon
9512     case TARGET_NR_swapon:
9513         if (!(p = lock_user_string(arg1)))
9514             return -TARGET_EFAULT;
9515         ret = get_errno(swapon(p, arg2));
9516         unlock_user(p, arg1, 0);
9517         return ret;
9518 #endif
9519     case TARGET_NR_reboot:
9520         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9521            /* arg4 must be ignored in all other cases */
9522            p = lock_user_string(arg4);
9523            if (!p) {
9524                return -TARGET_EFAULT;
9525            }
9526            ret = get_errno(reboot(arg1, arg2, arg3, p));
9527            unlock_user(p, arg4, 0);
9528         } else {
9529            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9530         }
9531         return ret;
9532 #ifdef TARGET_NR_mmap
9533     case TARGET_NR_mmap:
9534 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9535     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9536     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9537     || defined(TARGET_S390X)
9538         {
9539             abi_ulong *v;
9540             abi_ulong v1, v2, v3, v4, v5, v6;
9541             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9542                 return -TARGET_EFAULT;
9543             v1 = tswapal(v[0]);
9544             v2 = tswapal(v[1]);
9545             v3 = tswapal(v[2]);
9546             v4 = tswapal(v[3]);
9547             v5 = tswapal(v[4]);
9548             v6 = tswapal(v[5]);
9549             unlock_user(v, arg1, 0);
9550             ret = get_errno(target_mmap(v1, v2, v3,
9551                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9552                                         v5, v6));
9553         }
9554 #else
9555         /* mmap pointers are always untagged */
9556         ret = get_errno(target_mmap(arg1, arg2, arg3,
9557                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9558                                     arg5,
9559                                     arg6));
9560 #endif
9561         return ret;
9562 #endif
9563 #ifdef TARGET_NR_mmap2
9564     case TARGET_NR_mmap2:
9565 #ifndef MMAP_SHIFT
9566 #define MMAP_SHIFT 12
9567 #endif
9568         ret = target_mmap(arg1, arg2, arg3,
9569                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9570                           arg5, arg6 << MMAP_SHIFT);
9571         return get_errno(ret);
9572 #endif
9573     case TARGET_NR_munmap:
9574         arg1 = cpu_untagged_addr(cpu, arg1);
9575         return get_errno(target_munmap(arg1, arg2));
9576     case TARGET_NR_mprotect:
9577         arg1 = cpu_untagged_addr(cpu, arg1);
9578         {
9579             TaskState *ts = cpu->opaque;
9580             /* Special hack to detect libc making the stack executable.  */
9581             if ((arg3 & PROT_GROWSDOWN)
9582                 && arg1 >= ts->info->stack_limit
9583                 && arg1 <= ts->info->start_stack) {
9584                 arg3 &= ~PROT_GROWSDOWN;
9585                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9586                 arg1 = ts->info->stack_limit;
9587             }
9588         }
9589         return get_errno(target_mprotect(arg1, arg2, arg3));
9590 #ifdef TARGET_NR_mremap
9591     case TARGET_NR_mremap:
9592         arg1 = cpu_untagged_addr(cpu, arg1);
9593         /* mremap new_addr (arg5) is always untagged */
9594         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9595 #endif
9596         /* ??? msync/mlock/munlock are broken for softmmu.  */
9597 #ifdef TARGET_NR_msync
9598     case TARGET_NR_msync:
9599         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9600 #endif
9601 #ifdef TARGET_NR_mlock
9602     case TARGET_NR_mlock:
9603         return get_errno(mlock(g2h(cpu, arg1), arg2));
9604 #endif
9605 #ifdef TARGET_NR_munlock
9606     case TARGET_NR_munlock:
9607         return get_errno(munlock(g2h(cpu, arg1), arg2));
9608 #endif
9609 #ifdef TARGET_NR_mlockall
9610     case TARGET_NR_mlockall:
9611         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9612 #endif
9613 #ifdef TARGET_NR_munlockall
9614     case TARGET_NR_munlockall:
9615         return get_errno(munlockall());
9616 #endif
9617 #ifdef TARGET_NR_truncate
9618     case TARGET_NR_truncate:
9619         if (!(p = lock_user_string(arg1)))
9620             return -TARGET_EFAULT;
9621         ret = get_errno(truncate(p, arg2));
9622         unlock_user(p, arg1, 0);
9623         return ret;
9624 #endif
9625 #ifdef TARGET_NR_ftruncate
9626     case TARGET_NR_ftruncate:
9627         return get_errno(ftruncate(arg1, arg2));
9628 #endif
9629     case TARGET_NR_fchmod:
9630         return get_errno(fchmod(arg1, arg2));
9631 #if defined(TARGET_NR_fchmodat)
9632     case TARGET_NR_fchmodat:
9633         if (!(p = lock_user_string(arg2)))
9634             return -TARGET_EFAULT;
9635         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9636         unlock_user(p, arg2, 0);
9637         return ret;
9638 #endif
9639     case TARGET_NR_getpriority:
9640         /* Note that negative values are valid for getpriority, so we must
9641            differentiate based on errno settings.  */
9642         errno = 0;
9643         ret = getpriority(arg1, arg2);
9644         if (ret == -1 && errno != 0) {
9645             return -host_to_target_errno(errno);
9646         }
9647 #ifdef TARGET_ALPHA
9648         /* Return value is the unbiased priority.  Signal no error.  */
9649         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9650 #else
9651         /* Return value is a biased priority to avoid negative numbers.  */
9652         ret = 20 - ret;
9653 #endif
9654         return ret;
9655     case TARGET_NR_setpriority:
9656         return get_errno(setpriority(arg1, arg2, arg3));
9657 #ifdef TARGET_NR_statfs
9658     case TARGET_NR_statfs:
9659         if (!(p = lock_user_string(arg1))) {
9660             return -TARGET_EFAULT;
9661         }
9662         ret = get_errno(statfs(path(p), &stfs));
9663         unlock_user(p, arg1, 0);
9664     convert_statfs:
9665         if (!is_error(ret)) {
9666             struct target_statfs *target_stfs;
9667 
9668             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9669                 return -TARGET_EFAULT;
9670             __put_user(stfs.f_type, &target_stfs->f_type);
9671             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9672             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9673             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9674             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9675             __put_user(stfs.f_files, &target_stfs->f_files);
9676             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9677             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9678             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9679             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9680             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9681 #ifdef _STATFS_F_FLAGS
9682             __put_user(stfs.f_flags, &target_stfs->f_flags);
9683 #else
9684             __put_user(0, &target_stfs->f_flags);
9685 #endif
9686             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9687             unlock_user_struct(target_stfs, arg2, 1);
9688         }
9689         return ret;
9690 #endif
9691 #ifdef TARGET_NR_fstatfs
9692     case TARGET_NR_fstatfs:
9693         ret = get_errno(fstatfs(arg1, &stfs));
9694         goto convert_statfs;
9695 #endif
9696 #ifdef TARGET_NR_statfs64
9697     case TARGET_NR_statfs64:
9698         if (!(p = lock_user_string(arg1))) {
9699             return -TARGET_EFAULT;
9700         }
9701         ret = get_errno(statfs(path(p), &stfs));
9702         unlock_user(p, arg1, 0);
9703     convert_statfs64:
9704         if (!is_error(ret)) {
9705             struct target_statfs64 *target_stfs;
9706 
9707             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9708                 return -TARGET_EFAULT;
9709             __put_user(stfs.f_type, &target_stfs->f_type);
9710             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9711             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9712             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9713             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9714             __put_user(stfs.f_files, &target_stfs->f_files);
9715             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9716             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9717             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9718             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9719             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9720 #ifdef _STATFS_F_FLAGS
9721             __put_user(stfs.f_flags, &target_stfs->f_flags);
9722 #else
9723             __put_user(0, &target_stfs->f_flags);
9724 #endif
9725             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9726             unlock_user_struct(target_stfs, arg3, 1);
9727         }
9728         return ret;
9729     case TARGET_NR_fstatfs64:
9730         ret = get_errno(fstatfs(arg1, &stfs));
9731         goto convert_statfs64;
9732 #endif
9733 #ifdef TARGET_NR_socketcall
9734     case TARGET_NR_socketcall:
9735         return do_socketcall(arg1, arg2);
9736 #endif
9737 #ifdef TARGET_NR_accept
9738     case TARGET_NR_accept:
9739         return do_accept4(arg1, arg2, arg3, 0);
9740 #endif
9741 #ifdef TARGET_NR_accept4
9742     case TARGET_NR_accept4:
9743         return do_accept4(arg1, arg2, arg3, arg4);
9744 #endif
9745 #ifdef TARGET_NR_bind
9746     case TARGET_NR_bind:
9747         return do_bind(arg1, arg2, arg3);
9748 #endif
9749 #ifdef TARGET_NR_connect
9750     case TARGET_NR_connect:
9751         return do_connect(arg1, arg2, arg3);
9752 #endif
9753 #ifdef TARGET_NR_getpeername
9754     case TARGET_NR_getpeername:
9755         return do_getpeername(arg1, arg2, arg3);
9756 #endif
9757 #ifdef TARGET_NR_getsockname
9758     case TARGET_NR_getsockname:
9759         return do_getsockname(arg1, arg2, arg3);
9760 #endif
9761 #ifdef TARGET_NR_getsockopt
9762     case TARGET_NR_getsockopt:
9763         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9764 #endif
9765 #ifdef TARGET_NR_listen
9766     case TARGET_NR_listen:
9767         return get_errno(listen(arg1, arg2));
9768 #endif
9769 #ifdef TARGET_NR_recv
9770     case TARGET_NR_recv:
9771         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9772 #endif
9773 #ifdef TARGET_NR_recvfrom
9774     case TARGET_NR_recvfrom:
9775         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9776 #endif
9777 #ifdef TARGET_NR_recvmsg
9778     case TARGET_NR_recvmsg:
9779         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9780 #endif
9781 #ifdef TARGET_NR_send
9782     case TARGET_NR_send:
9783         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9784 #endif
9785 #ifdef TARGET_NR_sendmsg
9786     case TARGET_NR_sendmsg:
9787         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9788 #endif
9789 #ifdef TARGET_NR_sendmmsg
9790     case TARGET_NR_sendmmsg:
9791         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9792 #endif
9793 #ifdef TARGET_NR_recvmmsg
9794     case TARGET_NR_recvmmsg:
9795         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9796 #endif
9797 #ifdef TARGET_NR_sendto
9798     case TARGET_NR_sendto:
9799         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9800 #endif
9801 #ifdef TARGET_NR_shutdown
9802     case TARGET_NR_shutdown:
9803         return get_errno(shutdown(arg1, arg2));
9804 #endif
9805 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9806     case TARGET_NR_getrandom:
9807         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9808         if (!p) {
9809             return -TARGET_EFAULT;
9810         }
9811         ret = get_errno(getrandom(p, arg2, arg3));
9812         unlock_user(p, arg1, ret);
9813         return ret;
9814 #endif
9815 #ifdef TARGET_NR_socket
9816     case TARGET_NR_socket:
9817         return do_socket(arg1, arg2, arg3);
9818 #endif
9819 #ifdef TARGET_NR_socketpair
9820     case TARGET_NR_socketpair:
9821         return do_socketpair(arg1, arg2, arg3, arg4);
9822 #endif
9823 #ifdef TARGET_NR_setsockopt
9824     case TARGET_NR_setsockopt:
9825         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9826 #endif
9827 #if defined(TARGET_NR_syslog)
9828     case TARGET_NR_syslog:
9829         {
9830             int len = arg2;
9831 
9832             switch (arg1) {
9833             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9834             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9835             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9836             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9837             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9838             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9839             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9840             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9841                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9842             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9843             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9844             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9845                 {
9846                     if (len < 0) {
9847                         return -TARGET_EINVAL;
9848                     }
9849                     if (len == 0) {
9850                         return 0;
9851                     }
9852                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9853                     if (!p) {
9854                         return -TARGET_EFAULT;
9855                     }
9856                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9857                     unlock_user(p, arg2, arg3);
9858                 }
9859                 return ret;
9860             default:
9861                 return -TARGET_EINVAL;
9862             }
9863         }
9864         break;
9865 #endif
9866     case TARGET_NR_setitimer:
9867         {
9868             struct itimerval value, ovalue, *pvalue;
9869 
9870             if (arg2) {
9871                 pvalue = &value;
9872                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9873                     || copy_from_user_timeval(&pvalue->it_value,
9874                                               arg2 + sizeof(struct target_timeval)))
9875                     return -TARGET_EFAULT;
9876             } else {
9877                 pvalue = NULL;
9878             }
9879             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9880             if (!is_error(ret) && arg3) {
9881                 if (copy_to_user_timeval(arg3,
9882                                          &ovalue.it_interval)
9883                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9884                                             &ovalue.it_value))
9885                     return -TARGET_EFAULT;
9886             }
9887         }
9888         return ret;
9889     case TARGET_NR_getitimer:
9890         {
9891             struct itimerval value;
9892 
9893             ret = get_errno(getitimer(arg1, &value));
9894             if (!is_error(ret) && arg2) {
9895                 if (copy_to_user_timeval(arg2,
9896                                          &value.it_interval)
9897                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9898                                             &value.it_value))
9899                     return -TARGET_EFAULT;
9900             }
9901         }
9902         return ret;
9903 #ifdef TARGET_NR_stat
9904     case TARGET_NR_stat:
9905         if (!(p = lock_user_string(arg1))) {
9906             return -TARGET_EFAULT;
9907         }
9908         ret = get_errno(stat(path(p), &st));
9909         unlock_user(p, arg1, 0);
9910         goto do_stat;
9911 #endif
9912 #ifdef TARGET_NR_lstat
9913     case TARGET_NR_lstat:
9914         if (!(p = lock_user_string(arg1))) {
9915             return -TARGET_EFAULT;
9916         }
9917         ret = get_errno(lstat(path(p), &st));
9918         unlock_user(p, arg1, 0);
9919         goto do_stat;
9920 #endif
9921 #ifdef TARGET_NR_fstat
9922     case TARGET_NR_fstat:
9923         {
9924             ret = get_errno(fstat(arg1, &st));
9925 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9926         do_stat:
9927 #endif
9928             if (!is_error(ret)) {
9929                 struct target_stat *target_st;
9930 
9931                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9932                     return -TARGET_EFAULT;
9933                 memset(target_st, 0, sizeof(*target_st));
9934                 __put_user(st.st_dev, &target_st->st_dev);
9935                 __put_user(st.st_ino, &target_st->st_ino);
9936                 __put_user(st.st_mode, &target_st->st_mode);
9937                 __put_user(st.st_uid, &target_st->st_uid);
9938                 __put_user(st.st_gid, &target_st->st_gid);
9939                 __put_user(st.st_nlink, &target_st->st_nlink);
9940                 __put_user(st.st_rdev, &target_st->st_rdev);
9941                 __put_user(st.st_size, &target_st->st_size);
9942                 __put_user(st.st_blksize, &target_st->st_blksize);
9943                 __put_user(st.st_blocks, &target_st->st_blocks);
9944                 __put_user(st.st_atime, &target_st->target_st_atime);
9945                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9946                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9947 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9948                 __put_user(st.st_atim.tv_nsec,
9949                            &target_st->target_st_atime_nsec);
9950                 __put_user(st.st_mtim.tv_nsec,
9951                            &target_st->target_st_mtime_nsec);
9952                 __put_user(st.st_ctim.tv_nsec,
9953                            &target_st->target_st_ctime_nsec);
9954 #endif
9955                 unlock_user_struct(target_st, arg2, 1);
9956             }
9957         }
9958         return ret;
9959 #endif
9960     case TARGET_NR_vhangup:
9961         return get_errno(vhangup());
9962 #ifdef TARGET_NR_syscall
9963     case TARGET_NR_syscall:
9964         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9965                           arg6, arg7, arg8, 0);
9966 #endif
9967 #if defined(TARGET_NR_wait4)
9968     case TARGET_NR_wait4:
9969         {
9970             int status;
9971             abi_long status_ptr = arg2;
9972             struct rusage rusage, *rusage_ptr;
9973             abi_ulong target_rusage = arg4;
9974             abi_long rusage_err;
9975             if (target_rusage)
9976                 rusage_ptr = &rusage;
9977             else
9978                 rusage_ptr = NULL;
9979             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9980             if (!is_error(ret)) {
9981                 if (status_ptr && ret) {
9982                     status = host_to_target_waitstatus(status);
9983                     if (put_user_s32(status, status_ptr))
9984                         return -TARGET_EFAULT;
9985                 }
9986                 if (target_rusage) {
9987                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9988                     if (rusage_err) {
9989                         ret = rusage_err;
9990                     }
9991                 }
9992             }
9993         }
9994         return ret;
9995 #endif
9996 #ifdef TARGET_NR_swapoff
9997     case TARGET_NR_swapoff:
9998         if (!(p = lock_user_string(arg1)))
9999             return -TARGET_EFAULT;
10000         ret = get_errno(swapoff(p));
10001         unlock_user(p, arg1, 0);
10002         return ret;
10003 #endif
10004     case TARGET_NR_sysinfo:
10005         {
10006             struct target_sysinfo *target_value;
10007             struct sysinfo value;
10008             ret = get_errno(sysinfo(&value));
10009             if (!is_error(ret) && arg1)
10010             {
10011                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10012                     return -TARGET_EFAULT;
10013                 __put_user(value.uptime, &target_value->uptime);
10014                 __put_user(value.loads[0], &target_value->loads[0]);
10015                 __put_user(value.loads[1], &target_value->loads[1]);
10016                 __put_user(value.loads[2], &target_value->loads[2]);
10017                 __put_user(value.totalram, &target_value->totalram);
10018                 __put_user(value.freeram, &target_value->freeram);
10019                 __put_user(value.sharedram, &target_value->sharedram);
10020                 __put_user(value.bufferram, &target_value->bufferram);
10021                 __put_user(value.totalswap, &target_value->totalswap);
10022                 __put_user(value.freeswap, &target_value->freeswap);
10023                 __put_user(value.procs, &target_value->procs);
10024                 __put_user(value.totalhigh, &target_value->totalhigh);
10025                 __put_user(value.freehigh, &target_value->freehigh);
10026                 __put_user(value.mem_unit, &target_value->mem_unit);
10027                 unlock_user_struct(target_value, arg1, 1);
10028             }
10029         }
10030         return ret;
10031 #ifdef TARGET_NR_ipc
10032     case TARGET_NR_ipc:
10033         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10034 #endif
10035 #ifdef TARGET_NR_semget
10036     case TARGET_NR_semget:
10037         return get_errno(semget(arg1, arg2, arg3));
10038 #endif
10039 #ifdef TARGET_NR_semop
10040     case TARGET_NR_semop:
10041         return do_semtimedop(arg1, arg2, arg3, 0, false);
10042 #endif
10043 #ifdef TARGET_NR_semtimedop
10044     case TARGET_NR_semtimedop:
10045         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10046 #endif
10047 #ifdef TARGET_NR_semtimedop_time64
10048     case TARGET_NR_semtimedop_time64:
10049         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10050 #endif
10051 #ifdef TARGET_NR_semctl
10052     case TARGET_NR_semctl:
10053         return do_semctl(arg1, arg2, arg3, arg4);
10054 #endif
10055 #ifdef TARGET_NR_msgctl
10056     case TARGET_NR_msgctl:
10057         return do_msgctl(arg1, arg2, arg3);
10058 #endif
10059 #ifdef TARGET_NR_msgget
10060     case TARGET_NR_msgget:
10061         return get_errno(msgget(arg1, arg2));
10062 #endif
10063 #ifdef TARGET_NR_msgrcv
10064     case TARGET_NR_msgrcv:
10065         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10066 #endif
10067 #ifdef TARGET_NR_msgsnd
10068     case TARGET_NR_msgsnd:
10069         return do_msgsnd(arg1, arg2, arg3, arg4);
10070 #endif
10071 #ifdef TARGET_NR_shmget
10072     case TARGET_NR_shmget:
10073         return get_errno(shmget(arg1, arg2, arg3));
10074 #endif
10075 #ifdef TARGET_NR_shmctl
10076     case TARGET_NR_shmctl:
10077         return do_shmctl(arg1, arg2, arg3);
10078 #endif
10079 #ifdef TARGET_NR_shmat
10080     case TARGET_NR_shmat:
10081         return do_shmat(cpu_env, arg1, arg2, arg3);
10082 #endif
10083 #ifdef TARGET_NR_shmdt
10084     case TARGET_NR_shmdt:
10085         return do_shmdt(arg1);
10086 #endif
10087     case TARGET_NR_fsync:
10088         return get_errno(fsync(arg1));
10089     case TARGET_NR_clone:
10090         /* Linux manages to have three different orderings for its
10091          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10092          * match the kernel's CONFIG_CLONE_* settings.
10093          * Microblaze is further special in that it uses a sixth
10094          * implicit argument to clone for the TLS pointer.
10095          */
10096 #if defined(TARGET_MICROBLAZE)
10097         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10098 #elif defined(TARGET_CLONE_BACKWARDS)
10099         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10100 #elif defined(TARGET_CLONE_BACKWARDS2)
10101         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10102 #else
10103         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10104 #endif
10105         return ret;
10106 #ifdef __NR_exit_group
10107         /* new thread calls */
10108     case TARGET_NR_exit_group:
10109         preexit_cleanup(cpu_env, arg1);
10110         return get_errno(exit_group(arg1));
10111 #endif
10112     case TARGET_NR_setdomainname:
10113         if (!(p = lock_user_string(arg1)))
10114             return -TARGET_EFAULT;
10115         ret = get_errno(setdomainname(p, arg2));
10116         unlock_user(p, arg1, 0);
10117         return ret;
10118     case TARGET_NR_uname:
10119         /* no need to transcode because we use the linux syscall */
10120         {
10121             struct new_utsname * buf;
10122 
10123             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10124                 return -TARGET_EFAULT;
10125             ret = get_errno(sys_uname(buf));
10126             if (!is_error(ret)) {
10127                 /* Overwrite the native machine name with whatever is being
10128                    emulated. */
10129                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10130                           sizeof(buf->machine));
10131                 /* Allow the user to override the reported release.  */
10132                 if (qemu_uname_release && *qemu_uname_release) {
10133                     g_strlcpy(buf->release, qemu_uname_release,
10134                               sizeof(buf->release));
10135                 }
10136             }
10137             unlock_user_struct(buf, arg1, 1);
10138         }
10139         return ret;
10140 #ifdef TARGET_I386
10141     case TARGET_NR_modify_ldt:
10142         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10143 #if !defined(TARGET_X86_64)
10144     case TARGET_NR_vm86:
10145         return do_vm86(cpu_env, arg1, arg2);
10146 #endif
10147 #endif
10148 #if defined(TARGET_NR_adjtimex)
10149     case TARGET_NR_adjtimex:
10150         {
10151             struct timex host_buf;
10152 
10153             if (target_to_host_timex(&host_buf, arg1) != 0) {
10154                 return -TARGET_EFAULT;
10155             }
10156             ret = get_errno(adjtimex(&host_buf));
10157             if (!is_error(ret)) {
10158                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10159                     return -TARGET_EFAULT;
10160                 }
10161             }
10162         }
10163         return ret;
10164 #endif
10165 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10166     case TARGET_NR_clock_adjtime:
10167         {
10168             struct timex htx, *phtx = &htx;
10169 
10170             if (target_to_host_timex(phtx, arg2) != 0) {
10171                 return -TARGET_EFAULT;
10172             }
10173             ret = get_errno(clock_adjtime(arg1, phtx));
10174             if (!is_error(ret) && phtx) {
10175                 if (host_to_target_timex(arg2, phtx) != 0) {
10176                     return -TARGET_EFAULT;
10177                 }
10178             }
10179         }
10180         return ret;
10181 #endif
10182 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10183     case TARGET_NR_clock_adjtime64:
10184         {
10185             struct timex htx;
10186 
10187             if (target_to_host_timex64(&htx, arg2) != 0) {
10188                 return -TARGET_EFAULT;
10189             }
10190             ret = get_errno(clock_adjtime(arg1, &htx));
10191             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10192                     return -TARGET_EFAULT;
10193             }
10194         }
10195         return ret;
10196 #endif
10197     case TARGET_NR_getpgid:
10198         return get_errno(getpgid(arg1));
10199     case TARGET_NR_fchdir:
10200         return get_errno(fchdir(arg1));
10201     case TARGET_NR_personality:
10202         return get_errno(personality(arg1));
10203 #ifdef TARGET_NR__llseek /* Not on alpha */
10204     case TARGET_NR__llseek:
10205         {
10206             int64_t res;
10207 #if !defined(__NR_llseek)
10208             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10209             if (res == -1) {
10210                 ret = get_errno(res);
10211             } else {
10212                 ret = 0;
10213             }
10214 #else
10215             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10216 #endif
10217             if ((ret == 0) && put_user_s64(res, arg4)) {
10218                 return -TARGET_EFAULT;
10219             }
10220         }
10221         return ret;
10222 #endif
10223 #ifdef TARGET_NR_getdents
10224     case TARGET_NR_getdents:
10225 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10226 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10227         {
10228             struct target_dirent *target_dirp;
10229             struct linux_dirent *dirp;
10230             abi_long count = arg3;
10231 
10232             dirp = g_try_malloc(count);
10233             if (!dirp) {
10234                 return -TARGET_ENOMEM;
10235             }
10236 
10237             ret = get_errno(sys_getdents(arg1, dirp, count));
10238             if (!is_error(ret)) {
10239                 struct linux_dirent *de;
10240 		struct target_dirent *tde;
10241                 int len = ret;
10242                 int reclen, treclen;
10243 		int count1, tnamelen;
10244 
10245 		count1 = 0;
10246                 de = dirp;
10247                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10248                     return -TARGET_EFAULT;
10249 		tde = target_dirp;
10250                 while (len > 0) {
10251                     reclen = de->d_reclen;
10252                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10253                     assert(tnamelen >= 0);
10254                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10255                     assert(count1 + treclen <= count);
10256                     tde->d_reclen = tswap16(treclen);
10257                     tde->d_ino = tswapal(de->d_ino);
10258                     tde->d_off = tswapal(de->d_off);
10259                     memcpy(tde->d_name, de->d_name, tnamelen);
10260                     de = (struct linux_dirent *)((char *)de + reclen);
10261                     len -= reclen;
10262                     tde = (struct target_dirent *)((char *)tde + treclen);
10263 		    count1 += treclen;
10264                 }
10265 		ret = count1;
10266                 unlock_user(target_dirp, arg2, ret);
10267             }
10268             g_free(dirp);
10269         }
10270 #else
10271         {
10272             struct linux_dirent *dirp;
10273             abi_long count = arg3;
10274 
10275             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10276                 return -TARGET_EFAULT;
10277             ret = get_errno(sys_getdents(arg1, dirp, count));
10278             if (!is_error(ret)) {
10279                 struct linux_dirent *de;
10280                 int len = ret;
10281                 int reclen;
10282                 de = dirp;
10283                 while (len > 0) {
10284                     reclen = de->d_reclen;
10285                     if (reclen > len)
10286                         break;
10287                     de->d_reclen = tswap16(reclen);
10288                     tswapls(&de->d_ino);
10289                     tswapls(&de->d_off);
10290                     de = (struct linux_dirent *)((char *)de + reclen);
10291                     len -= reclen;
10292                 }
10293             }
10294             unlock_user(dirp, arg2, ret);
10295         }
10296 #endif
10297 #else
10298         /* Implement getdents in terms of getdents64 */
10299         {
10300             struct linux_dirent64 *dirp;
10301             abi_long count = arg3;
10302 
10303             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10304             if (!dirp) {
10305                 return -TARGET_EFAULT;
10306             }
10307             ret = get_errno(sys_getdents64(arg1, dirp, count));
10308             if (!is_error(ret)) {
10309                 /* Convert the dirent64 structs to target dirent.  We do this
10310                  * in-place, since we can guarantee that a target_dirent is no
10311                  * larger than a dirent64; however this means we have to be
10312                  * careful to read everything before writing in the new format.
10313                  */
10314                 struct linux_dirent64 *de;
10315                 struct target_dirent *tde;
10316                 int len = ret;
10317                 int tlen = 0;
10318 
10319                 de = dirp;
10320                 tde = (struct target_dirent *)dirp;
10321                 while (len > 0) {
10322                     int namelen, treclen;
10323                     int reclen = de->d_reclen;
10324                     uint64_t ino = de->d_ino;
10325                     int64_t off = de->d_off;
10326                     uint8_t type = de->d_type;
10327 
10328                     namelen = strlen(de->d_name);
10329                     treclen = offsetof(struct target_dirent, d_name)
10330                         + namelen + 2;
10331                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10332 
10333                     memmove(tde->d_name, de->d_name, namelen + 1);
10334                     tde->d_ino = tswapal(ino);
10335                     tde->d_off = tswapal(off);
10336                     tde->d_reclen = tswap16(treclen);
10337                     /* The target_dirent type is in what was formerly a padding
10338                      * byte at the end of the structure:
10339                      */
10340                     *(((char *)tde) + treclen - 1) = type;
10341 
10342                     de = (struct linux_dirent64 *)((char *)de + reclen);
10343                     tde = (struct target_dirent *)((char *)tde + treclen);
10344                     len -= reclen;
10345                     tlen += treclen;
10346                 }
10347                 ret = tlen;
10348             }
10349             unlock_user(dirp, arg2, ret);
10350         }
10351 #endif
10352         return ret;
10353 #endif /* TARGET_NR_getdents */
10354 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10355     case TARGET_NR_getdents64:
10356         {
10357             struct linux_dirent64 *dirp;
10358             abi_long count = arg3;
10359             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10360                 return -TARGET_EFAULT;
10361             ret = get_errno(sys_getdents64(arg1, dirp, count));
10362             if (!is_error(ret)) {
10363                 struct linux_dirent64 *de;
10364                 int len = ret;
10365                 int reclen;
10366                 de = dirp;
10367                 while (len > 0) {
10368                     reclen = de->d_reclen;
10369                     if (reclen > len)
10370                         break;
10371                     de->d_reclen = tswap16(reclen);
10372                     tswap64s((uint64_t *)&de->d_ino);
10373                     tswap64s((uint64_t *)&de->d_off);
10374                     de = (struct linux_dirent64 *)((char *)de + reclen);
10375                     len -= reclen;
10376                 }
10377             }
10378             unlock_user(dirp, arg2, ret);
10379         }
10380         return ret;
10381 #endif /* TARGET_NR_getdents64 */
10382 #if defined(TARGET_NR__newselect)
10383     case TARGET_NR__newselect:
10384         return do_select(arg1, arg2, arg3, arg4, arg5);
10385 #endif
10386 #ifdef TARGET_NR_poll
10387     case TARGET_NR_poll:
10388         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10389 #endif
10390 #ifdef TARGET_NR_ppoll
10391     case TARGET_NR_ppoll:
10392         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10393 #endif
10394 #ifdef TARGET_NR_ppoll_time64
10395     case TARGET_NR_ppoll_time64:
10396         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10397 #endif
10398     case TARGET_NR_flock:
10399         /* NOTE: the flock constant seems to be the same for every
10400            Linux platform */
10401         return get_errno(safe_flock(arg1, arg2));
10402     case TARGET_NR_readv:
10403         {
10404             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10405             if (vec != NULL) {
10406                 ret = get_errno(safe_readv(arg1, vec, arg3));
10407                 unlock_iovec(vec, arg2, arg3, 1);
10408             } else {
10409                 ret = -host_to_target_errno(errno);
10410             }
10411         }
10412         return ret;
10413     case TARGET_NR_writev:
10414         {
10415             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10416             if (vec != NULL) {
10417                 ret = get_errno(safe_writev(arg1, vec, arg3));
10418                 unlock_iovec(vec, arg2, arg3, 0);
10419             } else {
10420                 ret = -host_to_target_errno(errno);
10421             }
10422         }
10423         return ret;
10424 #if defined(TARGET_NR_preadv)
10425     case TARGET_NR_preadv:
10426         {
10427             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10428             if (vec != NULL) {
10429                 unsigned long low, high;
10430 
10431                 target_to_host_low_high(arg4, arg5, &low, &high);
10432                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10433                 unlock_iovec(vec, arg2, arg3, 1);
10434             } else {
10435                 ret = -host_to_target_errno(errno);
10436            }
10437         }
10438         return ret;
10439 #endif
10440 #if defined(TARGET_NR_pwritev)
10441     case TARGET_NR_pwritev:
10442         {
10443             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10444             if (vec != NULL) {
10445                 unsigned long low, high;
10446 
10447                 target_to_host_low_high(arg4, arg5, &low, &high);
10448                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10449                 unlock_iovec(vec, arg2, arg3, 0);
10450             } else {
10451                 ret = -host_to_target_errno(errno);
10452            }
10453         }
10454         return ret;
10455 #endif
10456     case TARGET_NR_getsid:
10457         return get_errno(getsid(arg1));
10458 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10459     case TARGET_NR_fdatasync:
10460         return get_errno(fdatasync(arg1));
10461 #endif
10462     case TARGET_NR_sched_getaffinity:
10463         {
10464             unsigned int mask_size;
10465             unsigned long *mask;
10466 
10467             /*
10468              * sched_getaffinity needs multiples of ulong, so need to take
10469              * care of mismatches between target ulong and host ulong sizes.
10470              */
10471             if (arg2 & (sizeof(abi_ulong) - 1)) {
10472                 return -TARGET_EINVAL;
10473             }
10474             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10475 
10476             mask = alloca(mask_size);
10477             memset(mask, 0, mask_size);
10478             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10479 
10480             if (!is_error(ret)) {
10481                 if (ret > arg2) {
10482                     /* More data returned than the caller's buffer will fit.
10483                      * This only happens if sizeof(abi_long) < sizeof(long)
10484                      * and the caller passed us a buffer holding an odd number
10485                      * of abi_longs. If the host kernel is actually using the
10486                      * extra 4 bytes then fail EINVAL; otherwise we can just
10487                      * ignore them and only copy the interesting part.
10488                      */
10489                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10490                     if (numcpus > arg2 * 8) {
10491                         return -TARGET_EINVAL;
10492                     }
10493                     ret = arg2;
10494                 }
10495 
10496                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10497                     return -TARGET_EFAULT;
10498                 }
10499             }
10500         }
10501         return ret;
10502     case TARGET_NR_sched_setaffinity:
10503         {
10504             unsigned int mask_size;
10505             unsigned long *mask;
10506 
10507             /*
10508              * sched_setaffinity needs multiples of ulong, so need to take
10509              * care of mismatches between target ulong and host ulong sizes.
10510              */
10511             if (arg2 & (sizeof(abi_ulong) - 1)) {
10512                 return -TARGET_EINVAL;
10513             }
10514             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10515             mask = alloca(mask_size);
10516 
10517             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10518             if (ret) {
10519                 return ret;
10520             }
10521 
10522             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10523         }
10524     case TARGET_NR_getcpu:
10525         {
10526             unsigned cpu, node;
10527             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10528                                        arg2 ? &node : NULL,
10529                                        NULL));
10530             if (is_error(ret)) {
10531                 return ret;
10532             }
10533             if (arg1 && put_user_u32(cpu, arg1)) {
10534                 return -TARGET_EFAULT;
10535             }
10536             if (arg2 && put_user_u32(node, arg2)) {
10537                 return -TARGET_EFAULT;
10538             }
10539         }
10540         return ret;
10541     case TARGET_NR_sched_setparam:
10542         {
10543             struct sched_param *target_schp;
10544             struct sched_param schp;
10545 
10546             if (arg2 == 0) {
10547                 return -TARGET_EINVAL;
10548             }
10549             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10550                 return -TARGET_EFAULT;
10551             schp.sched_priority = tswap32(target_schp->sched_priority);
10552             unlock_user_struct(target_schp, arg2, 0);
10553             return get_errno(sched_setparam(arg1, &schp));
10554         }
10555     case TARGET_NR_sched_getparam:
10556         {
10557             struct sched_param *target_schp;
10558             struct sched_param schp;
10559 
10560             if (arg2 == 0) {
10561                 return -TARGET_EINVAL;
10562             }
10563             ret = get_errno(sched_getparam(arg1, &schp));
10564             if (!is_error(ret)) {
10565                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10566                     return -TARGET_EFAULT;
10567                 target_schp->sched_priority = tswap32(schp.sched_priority);
10568                 unlock_user_struct(target_schp, arg2, 1);
10569             }
10570         }
10571         return ret;
10572     case TARGET_NR_sched_setscheduler:
10573         {
10574             struct sched_param *target_schp;
10575             struct sched_param schp;
10576             if (arg3 == 0) {
10577                 return -TARGET_EINVAL;
10578             }
10579             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10580                 return -TARGET_EFAULT;
10581             schp.sched_priority = tswap32(target_schp->sched_priority);
10582             unlock_user_struct(target_schp, arg3, 0);
10583             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10584         }
10585     case TARGET_NR_sched_getscheduler:
10586         return get_errno(sched_getscheduler(arg1));
10587     case TARGET_NR_sched_yield:
10588         return get_errno(sched_yield());
10589     case TARGET_NR_sched_get_priority_max:
10590         return get_errno(sched_get_priority_max(arg1));
10591     case TARGET_NR_sched_get_priority_min:
10592         return get_errno(sched_get_priority_min(arg1));
10593 #ifdef TARGET_NR_sched_rr_get_interval
10594     case TARGET_NR_sched_rr_get_interval:
10595         {
10596             struct timespec ts;
10597             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10598             if (!is_error(ret)) {
10599                 ret = host_to_target_timespec(arg2, &ts);
10600             }
10601         }
10602         return ret;
10603 #endif
10604 #ifdef TARGET_NR_sched_rr_get_interval_time64
10605     case TARGET_NR_sched_rr_get_interval_time64:
10606         {
10607             struct timespec ts;
10608             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10609             if (!is_error(ret)) {
10610                 ret = host_to_target_timespec64(arg2, &ts);
10611             }
10612         }
10613         return ret;
10614 #endif
10615 #if defined(TARGET_NR_nanosleep)
10616     case TARGET_NR_nanosleep:
10617         {
10618             struct timespec req, rem;
10619             target_to_host_timespec(&req, arg1);
10620             ret = get_errno(safe_nanosleep(&req, &rem));
10621             if (is_error(ret) && arg2) {
10622                 host_to_target_timespec(arg2, &rem);
10623             }
10624         }
10625         return ret;
10626 #endif
10627     case TARGET_NR_prctl:
10628         switch (arg1) {
10629         case PR_GET_PDEATHSIG:
10630         {
10631             int deathsig;
10632             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10633             if (!is_error(ret) && arg2
10634                 && put_user_s32(deathsig, arg2)) {
10635                 return -TARGET_EFAULT;
10636             }
10637             return ret;
10638         }
10639 #ifdef PR_GET_NAME
10640         case PR_GET_NAME:
10641         {
10642             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10643             if (!name) {
10644                 return -TARGET_EFAULT;
10645             }
10646             ret = get_errno(prctl(arg1, (unsigned long)name,
10647                                   arg3, arg4, arg5));
10648             unlock_user(name, arg2, 16);
10649             return ret;
10650         }
10651         case PR_SET_NAME:
10652         {
10653             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10654             if (!name) {
10655                 return -TARGET_EFAULT;
10656             }
10657             ret = get_errno(prctl(arg1, (unsigned long)name,
10658                                   arg3, arg4, arg5));
10659             unlock_user(name, arg2, 0);
10660             return ret;
10661         }
10662 #endif
10663 #ifdef TARGET_MIPS
10664         case TARGET_PR_GET_FP_MODE:
10665         {
10666             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10667             ret = 0;
10668             if (env->CP0_Status & (1 << CP0St_FR)) {
10669                 ret |= TARGET_PR_FP_MODE_FR;
10670             }
10671             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10672                 ret |= TARGET_PR_FP_MODE_FRE;
10673             }
10674             return ret;
10675         }
10676         case TARGET_PR_SET_FP_MODE:
10677         {
10678             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10679             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10680             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10681             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10682             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10683 
10684             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10685                                             TARGET_PR_FP_MODE_FRE;
10686 
10687             /* If nothing to change, return right away, successfully.  */
10688             if (old_fr == new_fr && old_fre == new_fre) {
10689                 return 0;
10690             }
10691             /* Check the value is valid */
10692             if (arg2 & ~known_bits) {
10693                 return -TARGET_EOPNOTSUPP;
10694             }
10695             /* Setting FRE without FR is not supported.  */
10696             if (new_fre && !new_fr) {
10697                 return -TARGET_EOPNOTSUPP;
10698             }
10699             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10700                 /* FR1 is not supported */
10701                 return -TARGET_EOPNOTSUPP;
10702             }
10703             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10704                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10705                 /* cannot set FR=0 */
10706                 return -TARGET_EOPNOTSUPP;
10707             }
10708             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10709                 /* Cannot set FRE=1 */
10710                 return -TARGET_EOPNOTSUPP;
10711             }
10712 
10713             int i;
10714             fpr_t *fpr = env->active_fpu.fpr;
10715             for (i = 0; i < 32 ; i += 2) {
10716                 if (!old_fr && new_fr) {
10717                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10718                 } else if (old_fr && !new_fr) {
10719                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10720                 }
10721             }
10722 
10723             if (new_fr) {
10724                 env->CP0_Status |= (1 << CP0St_FR);
10725                 env->hflags |= MIPS_HFLAG_F64;
10726             } else {
10727                 env->CP0_Status &= ~(1 << CP0St_FR);
10728                 env->hflags &= ~MIPS_HFLAG_F64;
10729             }
10730             if (new_fre) {
10731                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10732                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10733                     env->hflags |= MIPS_HFLAG_FRE;
10734                 }
10735             } else {
10736                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10737                 env->hflags &= ~MIPS_HFLAG_FRE;
10738             }
10739 
10740             return 0;
10741         }
10742 #endif /* MIPS */
10743 #ifdef TARGET_AARCH64
10744         case TARGET_PR_SVE_SET_VL:
10745             /*
10746              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10747              * PR_SVE_VL_INHERIT.  Note the kernel definition
10748              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10749              * even though the current architectural maximum is VQ=16.
10750              */
10751             ret = -TARGET_EINVAL;
10752             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10753                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10754                 CPUARMState *env = cpu_env;
10755                 ARMCPU *cpu = env_archcpu(env);
10756                 uint32_t vq, old_vq;
10757 
10758                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10759                 vq = MAX(arg2 / 16, 1);
10760                 vq = MIN(vq, cpu->sve_max_vq);
10761 
10762                 if (vq < old_vq) {
10763                     aarch64_sve_narrow_vq(env, vq);
10764                 }
10765                 env->vfp.zcr_el[1] = vq - 1;
10766                 arm_rebuild_hflags(env);
10767                 ret = vq * 16;
10768             }
10769             return ret;
10770         case TARGET_PR_SVE_GET_VL:
10771             ret = -TARGET_EINVAL;
10772             {
10773                 ARMCPU *cpu = env_archcpu(cpu_env);
10774                 if (cpu_isar_feature(aa64_sve, cpu)) {
10775                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10776                 }
10777             }
10778             return ret;
10779         case TARGET_PR_PAC_RESET_KEYS:
10780             {
10781                 CPUARMState *env = cpu_env;
10782                 ARMCPU *cpu = env_archcpu(env);
10783 
10784                 if (arg3 || arg4 || arg5) {
10785                     return -TARGET_EINVAL;
10786                 }
10787                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10788                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10789                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10790                                TARGET_PR_PAC_APGAKEY);
10791                     int ret = 0;
10792                     Error *err = NULL;
10793 
10794                     if (arg2 == 0) {
10795                         arg2 = all;
10796                     } else if (arg2 & ~all) {
10797                         return -TARGET_EINVAL;
10798                     }
10799                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10800                         ret |= qemu_guest_getrandom(&env->keys.apia,
10801                                                     sizeof(ARMPACKey), &err);
10802                     }
10803                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10804                         ret |= qemu_guest_getrandom(&env->keys.apib,
10805                                                     sizeof(ARMPACKey), &err);
10806                     }
10807                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10808                         ret |= qemu_guest_getrandom(&env->keys.apda,
10809                                                     sizeof(ARMPACKey), &err);
10810                     }
10811                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10812                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10813                                                     sizeof(ARMPACKey), &err);
10814                     }
10815                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10816                         ret |= qemu_guest_getrandom(&env->keys.apga,
10817                                                     sizeof(ARMPACKey), &err);
10818                     }
10819                     if (ret != 0) {
10820                         /*
10821                          * Some unknown failure in the crypto.  The best
10822                          * we can do is log it and fail the syscall.
10823                          * The real syscall cannot fail this way.
10824                          */
10825                         qemu_log_mask(LOG_UNIMP,
10826                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10827                                       error_get_pretty(err));
10828                         error_free(err);
10829                         return -TARGET_EIO;
10830                     }
10831                     return 0;
10832                 }
10833             }
10834             return -TARGET_EINVAL;
10835         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10836             {
10837                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10838                 CPUARMState *env = cpu_env;
10839                 ARMCPU *cpu = env_archcpu(env);
10840 
10841                 if (cpu_isar_feature(aa64_mte, cpu)) {
10842                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10843                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10844                 }
10845 
10846                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10847                     return -TARGET_EINVAL;
10848                 }
10849                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10850 
10851                 if (cpu_isar_feature(aa64_mte, cpu)) {
10852                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10853                     case TARGET_PR_MTE_TCF_NONE:
10854                     case TARGET_PR_MTE_TCF_SYNC:
10855                     case TARGET_PR_MTE_TCF_ASYNC:
10856                         break;
10857                     default:
10858                         return -EINVAL;
10859                     }
10860 
10861                     /*
10862                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10863                      * Note that the syscall values are consistent with hw.
10864                      */
10865                     env->cp15.sctlr_el[1] =
10866                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10867                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10868 
10869                     /*
10870                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10871                      * Note that the syscall uses an include mask,
10872                      * and hardware uses an exclude mask -- invert.
10873                      */
10874                     env->cp15.gcr_el1 =
10875                         deposit64(env->cp15.gcr_el1, 0, 16,
10876                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10877                     arm_rebuild_hflags(env);
10878                 }
10879                 return 0;
10880             }
10881         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10882             {
10883                 abi_long ret = 0;
10884                 CPUARMState *env = cpu_env;
10885                 ARMCPU *cpu = env_archcpu(env);
10886 
10887                 if (arg2 || arg3 || arg4 || arg5) {
10888                     return -TARGET_EINVAL;
10889                 }
10890                 if (env->tagged_addr_enable) {
10891                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10892                 }
10893                 if (cpu_isar_feature(aa64_mte, cpu)) {
10894                     /* See above. */
10895                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10896                             << TARGET_PR_MTE_TCF_SHIFT);
10897                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10898                                     ~env->cp15.gcr_el1);
10899                 }
10900                 return ret;
10901             }
10902 #endif /* AARCH64 */
10903         case PR_GET_SECCOMP:
10904         case PR_SET_SECCOMP:
10905             /* Disable seccomp to prevent the target disabling syscalls we
10906              * need. */
10907             return -TARGET_EINVAL;
10908         default:
10909             /* Most prctl options have no pointer arguments */
10910             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10911         }
10912         break;
10913 #ifdef TARGET_NR_arch_prctl
10914     case TARGET_NR_arch_prctl:
10915         return do_arch_prctl(cpu_env, arg1, arg2);
10916 #endif
10917 #ifdef TARGET_NR_pread64
10918     case TARGET_NR_pread64:
10919         if (regpairs_aligned(cpu_env, num)) {
10920             arg4 = arg5;
10921             arg5 = arg6;
10922         }
10923         if (arg2 == 0 && arg3 == 0) {
10924             /* Special-case NULL buffer and zero length, which should succeed */
10925             p = 0;
10926         } else {
10927             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10928             if (!p) {
10929                 return -TARGET_EFAULT;
10930             }
10931         }
10932         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10933         unlock_user(p, arg2, ret);
10934         return ret;
10935     case TARGET_NR_pwrite64:
10936         if (regpairs_aligned(cpu_env, num)) {
10937             arg4 = arg5;
10938             arg5 = arg6;
10939         }
10940         if (arg2 == 0 && arg3 == 0) {
10941             /* Special-case NULL buffer and zero length, which should succeed */
10942             p = 0;
10943         } else {
10944             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10945             if (!p) {
10946                 return -TARGET_EFAULT;
10947             }
10948         }
10949         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10950         unlock_user(p, arg2, 0);
10951         return ret;
10952 #endif
10953     case TARGET_NR_getcwd:
10954         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10955             return -TARGET_EFAULT;
10956         ret = get_errno(sys_getcwd1(p, arg2));
10957         unlock_user(p, arg1, ret);
10958         return ret;
10959     case TARGET_NR_capget:
10960     case TARGET_NR_capset:
10961     {
10962         struct target_user_cap_header *target_header;
10963         struct target_user_cap_data *target_data = NULL;
10964         struct __user_cap_header_struct header;
10965         struct __user_cap_data_struct data[2];
10966         struct __user_cap_data_struct *dataptr = NULL;
10967         int i, target_datalen;
10968         int data_items = 1;
10969 
10970         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10971             return -TARGET_EFAULT;
10972         }
10973         header.version = tswap32(target_header->version);
10974         header.pid = tswap32(target_header->pid);
10975 
10976         if (header.version != _LINUX_CAPABILITY_VERSION) {
10977             /* Version 2 and up takes pointer to two user_data structs */
10978             data_items = 2;
10979         }
10980 
10981         target_datalen = sizeof(*target_data) * data_items;
10982 
10983         if (arg2) {
10984             if (num == TARGET_NR_capget) {
10985                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10986             } else {
10987                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10988             }
10989             if (!target_data) {
10990                 unlock_user_struct(target_header, arg1, 0);
10991                 return -TARGET_EFAULT;
10992             }
10993 
10994             if (num == TARGET_NR_capset) {
10995                 for (i = 0; i < data_items; i++) {
10996                     data[i].effective = tswap32(target_data[i].effective);
10997                     data[i].permitted = tswap32(target_data[i].permitted);
10998                     data[i].inheritable = tswap32(target_data[i].inheritable);
10999                 }
11000             }
11001 
11002             dataptr = data;
11003         }
11004 
11005         if (num == TARGET_NR_capget) {
11006             ret = get_errno(capget(&header, dataptr));
11007         } else {
11008             ret = get_errno(capset(&header, dataptr));
11009         }
11010 
11011         /* The kernel always updates version for both capget and capset */
11012         target_header->version = tswap32(header.version);
11013         unlock_user_struct(target_header, arg1, 1);
11014 
11015         if (arg2) {
11016             if (num == TARGET_NR_capget) {
11017                 for (i = 0; i < data_items; i++) {
11018                     target_data[i].effective = tswap32(data[i].effective);
11019                     target_data[i].permitted = tswap32(data[i].permitted);
11020                     target_data[i].inheritable = tswap32(data[i].inheritable);
11021                 }
11022                 unlock_user(target_data, arg2, target_datalen);
11023             } else {
11024                 unlock_user(target_data, arg2, 0);
11025             }
11026         }
11027         return ret;
11028     }
11029     case TARGET_NR_sigaltstack:
11030         return do_sigaltstack(arg1, arg2, cpu_env);
11031 
11032 #ifdef CONFIG_SENDFILE
11033 #ifdef TARGET_NR_sendfile
11034     case TARGET_NR_sendfile:
11035     {
11036         off_t *offp = NULL;
11037         off_t off;
11038         if (arg3) {
11039             ret = get_user_sal(off, arg3);
11040             if (is_error(ret)) {
11041                 return ret;
11042             }
11043             offp = &off;
11044         }
11045         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11046         if (!is_error(ret) && arg3) {
11047             abi_long ret2 = put_user_sal(off, arg3);
11048             if (is_error(ret2)) {
11049                 ret = ret2;
11050             }
11051         }
11052         return ret;
11053     }
11054 #endif
11055 #ifdef TARGET_NR_sendfile64
11056     case TARGET_NR_sendfile64:
11057     {
11058         off_t *offp = NULL;
11059         off_t off;
11060         if (arg3) {
11061             ret = get_user_s64(off, arg3);
11062             if (is_error(ret)) {
11063                 return ret;
11064             }
11065             offp = &off;
11066         }
11067         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11068         if (!is_error(ret) && arg3) {
11069             abi_long ret2 = put_user_s64(off, arg3);
11070             if (is_error(ret2)) {
11071                 ret = ret2;
11072             }
11073         }
11074         return ret;
11075     }
11076 #endif
11077 #endif
11078 #ifdef TARGET_NR_vfork
11079     case TARGET_NR_vfork:
11080         return get_errno(do_fork(cpu_env,
11081                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11082                          0, 0, 0, 0));
11083 #endif
11084 #ifdef TARGET_NR_ugetrlimit
11085     case TARGET_NR_ugetrlimit:
11086     {
11087 	struct rlimit rlim;
11088 	int resource = target_to_host_resource(arg1);
11089 	ret = get_errno(getrlimit(resource, &rlim));
11090 	if (!is_error(ret)) {
11091 	    struct target_rlimit *target_rlim;
11092             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11093                 return -TARGET_EFAULT;
11094 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11095 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11096             unlock_user_struct(target_rlim, arg2, 1);
11097 	}
11098         return ret;
11099     }
11100 #endif
11101 #ifdef TARGET_NR_truncate64
11102     case TARGET_NR_truncate64:
11103         if (!(p = lock_user_string(arg1)))
11104             return -TARGET_EFAULT;
11105 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11106         unlock_user(p, arg1, 0);
11107         return ret;
11108 #endif
11109 #ifdef TARGET_NR_ftruncate64
11110     case TARGET_NR_ftruncate64:
11111         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11112 #endif
11113 #ifdef TARGET_NR_stat64
11114     case TARGET_NR_stat64:
11115         if (!(p = lock_user_string(arg1))) {
11116             return -TARGET_EFAULT;
11117         }
11118         ret = get_errno(stat(path(p), &st));
11119         unlock_user(p, arg1, 0);
11120         if (!is_error(ret))
11121             ret = host_to_target_stat64(cpu_env, arg2, &st);
11122         return ret;
11123 #endif
11124 #ifdef TARGET_NR_lstat64
11125     case TARGET_NR_lstat64:
11126         if (!(p = lock_user_string(arg1))) {
11127             return -TARGET_EFAULT;
11128         }
11129         ret = get_errno(lstat(path(p), &st));
11130         unlock_user(p, arg1, 0);
11131         if (!is_error(ret))
11132             ret = host_to_target_stat64(cpu_env, arg2, &st);
11133         return ret;
11134 #endif
11135 #ifdef TARGET_NR_fstat64
11136     case TARGET_NR_fstat64:
11137         ret = get_errno(fstat(arg1, &st));
11138         if (!is_error(ret))
11139             ret = host_to_target_stat64(cpu_env, arg2, &st);
11140         return ret;
11141 #endif
11142 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11143 #ifdef TARGET_NR_fstatat64
11144     case TARGET_NR_fstatat64:
11145 #endif
11146 #ifdef TARGET_NR_newfstatat
11147     case TARGET_NR_newfstatat:
11148 #endif
11149         if (!(p = lock_user_string(arg2))) {
11150             return -TARGET_EFAULT;
11151         }
11152         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11153         unlock_user(p, arg2, 0);
11154         if (!is_error(ret))
11155             ret = host_to_target_stat64(cpu_env, arg3, &st);
11156         return ret;
11157 #endif
11158 #if defined(TARGET_NR_statx)
11159     case TARGET_NR_statx:
11160         {
11161             struct target_statx *target_stx;
11162             int dirfd = arg1;
11163             int flags = arg3;
11164 
11165             p = lock_user_string(arg2);
11166             if (p == NULL) {
11167                 return -TARGET_EFAULT;
11168             }
11169 #if defined(__NR_statx)
11170             {
11171                 /*
11172                  * It is assumed that struct statx is architecture independent.
11173                  */
11174                 struct target_statx host_stx;
11175                 int mask = arg4;
11176 
11177                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11178                 if (!is_error(ret)) {
11179                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11180                         unlock_user(p, arg2, 0);
11181                         return -TARGET_EFAULT;
11182                     }
11183                 }
11184 
11185                 if (ret != -TARGET_ENOSYS) {
11186                     unlock_user(p, arg2, 0);
11187                     return ret;
11188                 }
11189             }
11190 #endif
11191             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11192             unlock_user(p, arg2, 0);
11193 
11194             if (!is_error(ret)) {
11195                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11196                     return -TARGET_EFAULT;
11197                 }
11198                 memset(target_stx, 0, sizeof(*target_stx));
11199                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11200                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11201                 __put_user(st.st_ino, &target_stx->stx_ino);
11202                 __put_user(st.st_mode, &target_stx->stx_mode);
11203                 __put_user(st.st_uid, &target_stx->stx_uid);
11204                 __put_user(st.st_gid, &target_stx->stx_gid);
11205                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11206                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11207                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11208                 __put_user(st.st_size, &target_stx->stx_size);
11209                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11210                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11211                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11212                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11213                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11214                 unlock_user_struct(target_stx, arg5, 1);
11215             }
11216         }
11217         return ret;
11218 #endif
11219 #ifdef TARGET_NR_lchown
11220     case TARGET_NR_lchown:
11221         if (!(p = lock_user_string(arg1)))
11222             return -TARGET_EFAULT;
11223         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11224         unlock_user(p, arg1, 0);
11225         return ret;
11226 #endif
11227 #ifdef TARGET_NR_getuid
11228     case TARGET_NR_getuid:
11229         return get_errno(high2lowuid(getuid()));
11230 #endif
11231 #ifdef TARGET_NR_getgid
11232     case TARGET_NR_getgid:
11233         return get_errno(high2lowgid(getgid()));
11234 #endif
11235 #ifdef TARGET_NR_geteuid
11236     case TARGET_NR_geteuid:
11237         return get_errno(high2lowuid(geteuid()));
11238 #endif
11239 #ifdef TARGET_NR_getegid
11240     case TARGET_NR_getegid:
11241         return get_errno(high2lowgid(getegid()));
11242 #endif
11243     case TARGET_NR_setreuid:
11244         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11245     case TARGET_NR_setregid:
11246         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11247     case TARGET_NR_getgroups:
11248         {
11249             int gidsetsize = arg1;
11250             target_id *target_grouplist;
11251             gid_t *grouplist;
11252             int i;
11253 
11254             grouplist = alloca(gidsetsize * sizeof(gid_t));
11255             ret = get_errno(getgroups(gidsetsize, grouplist));
11256             if (gidsetsize == 0)
11257                 return ret;
11258             if (!is_error(ret)) {
11259                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11260                 if (!target_grouplist)
11261                     return -TARGET_EFAULT;
11262                 for(i = 0;i < ret; i++)
11263                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11264                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11265             }
11266         }
11267         return ret;
11268     case TARGET_NR_setgroups:
11269         {
11270             int gidsetsize = arg1;
11271             target_id *target_grouplist;
11272             gid_t *grouplist = NULL;
11273             int i;
11274             if (gidsetsize) {
11275                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11276                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11277                 if (!target_grouplist) {
11278                     return -TARGET_EFAULT;
11279                 }
11280                 for (i = 0; i < gidsetsize; i++) {
11281                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11282                 }
11283                 unlock_user(target_grouplist, arg2, 0);
11284             }
11285             return get_errno(setgroups(gidsetsize, grouplist));
11286         }
11287     case TARGET_NR_fchown:
11288         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11289 #if defined(TARGET_NR_fchownat)
11290     case TARGET_NR_fchownat:
11291         if (!(p = lock_user_string(arg2)))
11292             return -TARGET_EFAULT;
11293         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11294                                  low2highgid(arg4), arg5));
11295         unlock_user(p, arg2, 0);
11296         return ret;
11297 #endif
11298 #ifdef TARGET_NR_setresuid
11299     case TARGET_NR_setresuid:
11300         return get_errno(sys_setresuid(low2highuid(arg1),
11301                                        low2highuid(arg2),
11302                                        low2highuid(arg3)));
11303 #endif
11304 #ifdef TARGET_NR_getresuid
11305     case TARGET_NR_getresuid:
11306         {
11307             uid_t ruid, euid, suid;
11308             ret = get_errno(getresuid(&ruid, &euid, &suid));
11309             if (!is_error(ret)) {
11310                 if (put_user_id(high2lowuid(ruid), arg1)
11311                     || put_user_id(high2lowuid(euid), arg2)
11312                     || put_user_id(high2lowuid(suid), arg3))
11313                     return -TARGET_EFAULT;
11314             }
11315         }
11316         return ret;
11317 #endif
11318 #ifdef TARGET_NR_getresgid
11319     case TARGET_NR_setresgid:
11320         return get_errno(sys_setresgid(low2highgid(arg1),
11321                                        low2highgid(arg2),
11322                                        low2highgid(arg3)));
11323 #endif
11324 #ifdef TARGET_NR_getresgid
11325     case TARGET_NR_getresgid:
11326         {
11327             gid_t rgid, egid, sgid;
11328             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11329             if (!is_error(ret)) {
11330                 if (put_user_id(high2lowgid(rgid), arg1)
11331                     || put_user_id(high2lowgid(egid), arg2)
11332                     || put_user_id(high2lowgid(sgid), arg3))
11333                     return -TARGET_EFAULT;
11334             }
11335         }
11336         return ret;
11337 #endif
11338 #ifdef TARGET_NR_chown
11339     case TARGET_NR_chown:
11340         if (!(p = lock_user_string(arg1)))
11341             return -TARGET_EFAULT;
11342         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11343         unlock_user(p, arg1, 0);
11344         return ret;
11345 #endif
11346     case TARGET_NR_setuid:
11347         return get_errno(sys_setuid(low2highuid(arg1)));
11348     case TARGET_NR_setgid:
11349         return get_errno(sys_setgid(low2highgid(arg1)));
11350     case TARGET_NR_setfsuid:
11351         return get_errno(setfsuid(arg1));
11352     case TARGET_NR_setfsgid:
11353         return get_errno(setfsgid(arg1));
11354 
11355 #ifdef TARGET_NR_lchown32
11356     case TARGET_NR_lchown32:
11357         if (!(p = lock_user_string(arg1)))
11358             return -TARGET_EFAULT;
11359         ret = get_errno(lchown(p, arg2, arg3));
11360         unlock_user(p, arg1, 0);
11361         return ret;
11362 #endif
11363 #ifdef TARGET_NR_getuid32
11364     case TARGET_NR_getuid32:
11365         return get_errno(getuid());
11366 #endif
11367 
11368 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11369    /* Alpha specific */
11370     case TARGET_NR_getxuid:
11371          {
11372             uid_t euid;
11373             euid=geteuid();
11374             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11375          }
11376         return get_errno(getuid());
11377 #endif
11378 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11379    /* Alpha specific */
11380     case TARGET_NR_getxgid:
11381          {
11382             uid_t egid;
11383             egid=getegid();
11384             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11385          }
11386         return get_errno(getgid());
11387 #endif
11388 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11389     /* Alpha specific */
11390     case TARGET_NR_osf_getsysinfo:
11391         ret = -TARGET_EOPNOTSUPP;
11392         switch (arg1) {
11393           case TARGET_GSI_IEEE_FP_CONTROL:
11394             {
11395                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11396                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11397 
11398                 swcr &= ~SWCR_STATUS_MASK;
11399                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11400 
11401                 if (put_user_u64 (swcr, arg2))
11402                         return -TARGET_EFAULT;
11403                 ret = 0;
11404             }
11405             break;
11406 
11407           /* case GSI_IEEE_STATE_AT_SIGNAL:
11408              -- Not implemented in linux kernel.
11409              case GSI_UACPROC:
11410              -- Retrieves current unaligned access state; not much used.
11411              case GSI_PROC_TYPE:
11412              -- Retrieves implver information; surely not used.
11413              case GSI_GET_HWRPB:
11414              -- Grabs a copy of the HWRPB; surely not used.
11415           */
11416         }
11417         return ret;
11418 #endif
11419 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11420     /* Alpha specific */
11421     case TARGET_NR_osf_setsysinfo:
11422         ret = -TARGET_EOPNOTSUPP;
11423         switch (arg1) {
11424           case TARGET_SSI_IEEE_FP_CONTROL:
11425             {
11426                 uint64_t swcr, fpcr;
11427 
11428                 if (get_user_u64 (swcr, arg2)) {
11429                     return -TARGET_EFAULT;
11430                 }
11431 
11432                 /*
11433                  * The kernel calls swcr_update_status to update the
11434                  * status bits from the fpcr at every point that it
11435                  * could be queried.  Therefore, we store the status
11436                  * bits only in FPCR.
11437                  */
11438                 ((CPUAlphaState *)cpu_env)->swcr
11439                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11440 
11441                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11442                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11443                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11444                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11445                 ret = 0;
11446             }
11447             break;
11448 
11449           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11450             {
11451                 uint64_t exc, fpcr, fex;
11452 
11453                 if (get_user_u64(exc, arg2)) {
11454                     return -TARGET_EFAULT;
11455                 }
11456                 exc &= SWCR_STATUS_MASK;
11457                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11458 
11459                 /* Old exceptions are not signaled.  */
11460                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11461                 fex = exc & ~fex;
11462                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11463                 fex &= ((CPUArchState *)cpu_env)->swcr;
11464 
11465                 /* Update the hardware fpcr.  */
11466                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11467                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11468 
11469                 if (fex) {
11470                     int si_code = TARGET_FPE_FLTUNK;
11471                     target_siginfo_t info;
11472 
11473                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11474                         si_code = TARGET_FPE_FLTUND;
11475                     }
11476                     if (fex & SWCR_TRAP_ENABLE_INE) {
11477                         si_code = TARGET_FPE_FLTRES;
11478                     }
11479                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11480                         si_code = TARGET_FPE_FLTUND;
11481                     }
11482                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11483                         si_code = TARGET_FPE_FLTOVF;
11484                     }
11485                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11486                         si_code = TARGET_FPE_FLTDIV;
11487                     }
11488                     if (fex & SWCR_TRAP_ENABLE_INV) {
11489                         si_code = TARGET_FPE_FLTINV;
11490                     }
11491 
11492                     info.si_signo = SIGFPE;
11493                     info.si_errno = 0;
11494                     info.si_code = si_code;
11495                     info._sifields._sigfault._addr
11496                         = ((CPUArchState *)cpu_env)->pc;
11497                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11498                                  QEMU_SI_FAULT, &info);
11499                 }
11500                 ret = 0;
11501             }
11502             break;
11503 
11504           /* case SSI_NVPAIRS:
11505              -- Used with SSIN_UACPROC to enable unaligned accesses.
11506              case SSI_IEEE_STATE_AT_SIGNAL:
11507              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11508              -- Not implemented in linux kernel
11509           */
11510         }
11511         return ret;
11512 #endif
11513 #ifdef TARGET_NR_osf_sigprocmask
11514     /* Alpha specific.  */
11515     case TARGET_NR_osf_sigprocmask:
11516         {
11517             abi_ulong mask;
11518             int how;
11519             sigset_t set, oldset;
11520 
11521             switch(arg1) {
11522             case TARGET_SIG_BLOCK:
11523                 how = SIG_BLOCK;
11524                 break;
11525             case TARGET_SIG_UNBLOCK:
11526                 how = SIG_UNBLOCK;
11527                 break;
11528             case TARGET_SIG_SETMASK:
11529                 how = SIG_SETMASK;
11530                 break;
11531             default:
11532                 return -TARGET_EINVAL;
11533             }
11534             mask = arg2;
11535             target_to_host_old_sigset(&set, &mask);
11536             ret = do_sigprocmask(how, &set, &oldset);
11537             if (!ret) {
11538                 host_to_target_old_sigset(&mask, &oldset);
11539                 ret = mask;
11540             }
11541         }
11542         return ret;
11543 #endif
11544 
11545 #ifdef TARGET_NR_getgid32
11546     case TARGET_NR_getgid32:
11547         return get_errno(getgid());
11548 #endif
11549 #ifdef TARGET_NR_geteuid32
11550     case TARGET_NR_geteuid32:
11551         return get_errno(geteuid());
11552 #endif
11553 #ifdef TARGET_NR_getegid32
11554     case TARGET_NR_getegid32:
11555         return get_errno(getegid());
11556 #endif
11557 #ifdef TARGET_NR_setreuid32
11558     case TARGET_NR_setreuid32:
11559         return get_errno(setreuid(arg1, arg2));
11560 #endif
11561 #ifdef TARGET_NR_setregid32
11562     case TARGET_NR_setregid32:
11563         return get_errno(setregid(arg1, arg2));
11564 #endif
11565 #ifdef TARGET_NR_getgroups32
11566     case TARGET_NR_getgroups32:
11567         {
11568             int gidsetsize = arg1;
11569             uint32_t *target_grouplist;
11570             gid_t *grouplist;
11571             int i;
11572 
11573             grouplist = alloca(gidsetsize * sizeof(gid_t));
11574             ret = get_errno(getgroups(gidsetsize, grouplist));
11575             if (gidsetsize == 0)
11576                 return ret;
11577             if (!is_error(ret)) {
11578                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11579                 if (!target_grouplist) {
11580                     return -TARGET_EFAULT;
11581                 }
11582                 for(i = 0;i < ret; i++)
11583                     target_grouplist[i] = tswap32(grouplist[i]);
11584                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11585             }
11586         }
11587         return ret;
11588 #endif
11589 #ifdef TARGET_NR_setgroups32
11590     case TARGET_NR_setgroups32:
11591         {
11592             int gidsetsize = arg1;
11593             uint32_t *target_grouplist;
11594             gid_t *grouplist;
11595             int i;
11596 
11597             grouplist = alloca(gidsetsize * sizeof(gid_t));
11598             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11599             if (!target_grouplist) {
11600                 return -TARGET_EFAULT;
11601             }
11602             for(i = 0;i < gidsetsize; i++)
11603                 grouplist[i] = tswap32(target_grouplist[i]);
11604             unlock_user(target_grouplist, arg2, 0);
11605             return get_errno(setgroups(gidsetsize, grouplist));
11606         }
11607 #endif
11608 #ifdef TARGET_NR_fchown32
11609     case TARGET_NR_fchown32:
11610         return get_errno(fchown(arg1, arg2, arg3));
11611 #endif
11612 #ifdef TARGET_NR_setresuid32
11613     case TARGET_NR_setresuid32:
11614         return get_errno(sys_setresuid(arg1, arg2, arg3));
11615 #endif
11616 #ifdef TARGET_NR_getresuid32
11617     case TARGET_NR_getresuid32:
11618         {
11619             uid_t ruid, euid, suid;
11620             ret = get_errno(getresuid(&ruid, &euid, &suid));
11621             if (!is_error(ret)) {
11622                 if (put_user_u32(ruid, arg1)
11623                     || put_user_u32(euid, arg2)
11624                     || put_user_u32(suid, arg3))
11625                     return -TARGET_EFAULT;
11626             }
11627         }
11628         return ret;
11629 #endif
11630 #ifdef TARGET_NR_setresgid32
11631     case TARGET_NR_setresgid32:
11632         return get_errno(sys_setresgid(arg1, arg2, arg3));
11633 #endif
11634 #ifdef TARGET_NR_getresgid32
11635     case TARGET_NR_getresgid32:
11636         {
11637             gid_t rgid, egid, sgid;
11638             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11639             if (!is_error(ret)) {
11640                 if (put_user_u32(rgid, arg1)
11641                     || put_user_u32(egid, arg2)
11642                     || put_user_u32(sgid, arg3))
11643                     return -TARGET_EFAULT;
11644             }
11645         }
11646         return ret;
11647 #endif
11648 #ifdef TARGET_NR_chown32
11649     case TARGET_NR_chown32:
11650         if (!(p = lock_user_string(arg1)))
11651             return -TARGET_EFAULT;
11652         ret = get_errno(chown(p, arg2, arg3));
11653         unlock_user(p, arg1, 0);
11654         return ret;
11655 #endif
11656 #ifdef TARGET_NR_setuid32
11657     case TARGET_NR_setuid32:
11658         return get_errno(sys_setuid(arg1));
11659 #endif
11660 #ifdef TARGET_NR_setgid32
11661     case TARGET_NR_setgid32:
11662         return get_errno(sys_setgid(arg1));
11663 #endif
11664 #ifdef TARGET_NR_setfsuid32
11665     case TARGET_NR_setfsuid32:
11666         return get_errno(setfsuid(arg1));
11667 #endif
11668 #ifdef TARGET_NR_setfsgid32
11669     case TARGET_NR_setfsgid32:
11670         return get_errno(setfsgid(arg1));
11671 #endif
11672 #ifdef TARGET_NR_mincore
11673     case TARGET_NR_mincore:
11674         {
11675             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11676             if (!a) {
11677                 return -TARGET_ENOMEM;
11678             }
11679             p = lock_user_string(arg3);
11680             if (!p) {
11681                 ret = -TARGET_EFAULT;
11682             } else {
11683                 ret = get_errno(mincore(a, arg2, p));
11684                 unlock_user(p, arg3, ret);
11685             }
11686             unlock_user(a, arg1, 0);
11687         }
11688         return ret;
11689 #endif
11690 #ifdef TARGET_NR_arm_fadvise64_64
11691     case TARGET_NR_arm_fadvise64_64:
11692         /* arm_fadvise64_64 looks like fadvise64_64 but
11693          * with different argument order: fd, advice, offset, len
11694          * rather than the usual fd, offset, len, advice.
11695          * Note that offset and len are both 64-bit so appear as
11696          * pairs of 32-bit registers.
11697          */
11698         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11699                             target_offset64(arg5, arg6), arg2);
11700         return -host_to_target_errno(ret);
11701 #endif
11702 
11703 #if TARGET_ABI_BITS == 32
11704 
11705 #ifdef TARGET_NR_fadvise64_64
11706     case TARGET_NR_fadvise64_64:
11707 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11708         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11709         ret = arg2;
11710         arg2 = arg3;
11711         arg3 = arg4;
11712         arg4 = arg5;
11713         arg5 = arg6;
11714         arg6 = ret;
11715 #else
11716         /* 6 args: fd, offset (high, low), len (high, low), advice */
11717         if (regpairs_aligned(cpu_env, num)) {
11718             /* offset is in (3,4), len in (5,6) and advice in 7 */
11719             arg2 = arg3;
11720             arg3 = arg4;
11721             arg4 = arg5;
11722             arg5 = arg6;
11723             arg6 = arg7;
11724         }
11725 #endif
11726         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11727                             target_offset64(arg4, arg5), arg6);
11728         return -host_to_target_errno(ret);
11729 #endif
11730 
11731 #ifdef TARGET_NR_fadvise64
11732     case TARGET_NR_fadvise64:
11733         /* 5 args: fd, offset (high, low), len, advice */
11734         if (regpairs_aligned(cpu_env, num)) {
11735             /* offset is in (3,4), len in 5 and advice in 6 */
11736             arg2 = arg3;
11737             arg3 = arg4;
11738             arg4 = arg5;
11739             arg5 = arg6;
11740         }
11741         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11742         return -host_to_target_errno(ret);
11743 #endif
11744 
11745 #else /* not a 32-bit ABI */
11746 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11747 #ifdef TARGET_NR_fadvise64_64
11748     case TARGET_NR_fadvise64_64:
11749 #endif
11750 #ifdef TARGET_NR_fadvise64
11751     case TARGET_NR_fadvise64:
11752 #endif
11753 #ifdef TARGET_S390X
11754         switch (arg4) {
11755         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11756         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11757         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11758         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11759         default: break;
11760         }
11761 #endif
11762         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11763 #endif
11764 #endif /* end of 64-bit ABI fadvise handling */
11765 
11766 #ifdef TARGET_NR_madvise
11767     case TARGET_NR_madvise:
11768         /* A straight passthrough may not be safe because qemu sometimes
11769            turns private file-backed mappings into anonymous mappings.
11770            This will break MADV_DONTNEED.
11771            This is a hint, so ignoring and returning success is ok.  */
11772         return 0;
11773 #endif
11774 #ifdef TARGET_NR_fcntl64
11775     case TARGET_NR_fcntl64:
11776     {
11777         int cmd;
11778         struct flock64 fl;
11779         from_flock64_fn *copyfrom = copy_from_user_flock64;
11780         to_flock64_fn *copyto = copy_to_user_flock64;
11781 
11782 #ifdef TARGET_ARM
11783         if (!((CPUARMState *)cpu_env)->eabi) {
11784             copyfrom = copy_from_user_oabi_flock64;
11785             copyto = copy_to_user_oabi_flock64;
11786         }
11787 #endif
11788 
11789         cmd = target_to_host_fcntl_cmd(arg2);
11790         if (cmd == -TARGET_EINVAL) {
11791             return cmd;
11792         }
11793 
11794         switch(arg2) {
11795         case TARGET_F_GETLK64:
11796             ret = copyfrom(&fl, arg3);
11797             if (ret) {
11798                 break;
11799             }
11800             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11801             if (ret == 0) {
11802                 ret = copyto(arg3, &fl);
11803             }
11804 	    break;
11805 
11806         case TARGET_F_SETLK64:
11807         case TARGET_F_SETLKW64:
11808             ret = copyfrom(&fl, arg3);
11809             if (ret) {
11810                 break;
11811             }
11812             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11813 	    break;
11814         default:
11815             ret = do_fcntl(arg1, arg2, arg3);
11816             break;
11817         }
11818         return ret;
11819     }
11820 #endif
11821 #ifdef TARGET_NR_cacheflush
11822     case TARGET_NR_cacheflush:
11823         /* self-modifying code is handled automatically, so nothing needed */
11824         return 0;
11825 #endif
11826 #ifdef TARGET_NR_getpagesize
11827     case TARGET_NR_getpagesize:
11828         return TARGET_PAGE_SIZE;
11829 #endif
11830     case TARGET_NR_gettid:
11831         return get_errno(sys_gettid());
11832 #ifdef TARGET_NR_readahead
11833     case TARGET_NR_readahead:
11834 #if TARGET_ABI_BITS == 32
11835         if (regpairs_aligned(cpu_env, num)) {
11836             arg2 = arg3;
11837             arg3 = arg4;
11838             arg4 = arg5;
11839         }
11840         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11841 #else
11842         ret = get_errno(readahead(arg1, arg2, arg3));
11843 #endif
11844         return ret;
11845 #endif
11846 #ifdef CONFIG_ATTR
11847 #ifdef TARGET_NR_setxattr
11848     case TARGET_NR_listxattr:
11849     case TARGET_NR_llistxattr:
11850     {
11851         void *p, *b = 0;
11852         if (arg2) {
11853             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11854             if (!b) {
11855                 return -TARGET_EFAULT;
11856             }
11857         }
11858         p = lock_user_string(arg1);
11859         if (p) {
11860             if (num == TARGET_NR_listxattr) {
11861                 ret = get_errno(listxattr(p, b, arg3));
11862             } else {
11863                 ret = get_errno(llistxattr(p, b, arg3));
11864             }
11865         } else {
11866             ret = -TARGET_EFAULT;
11867         }
11868         unlock_user(p, arg1, 0);
11869         unlock_user(b, arg2, arg3);
11870         return ret;
11871     }
11872     case TARGET_NR_flistxattr:
11873     {
11874         void *b = 0;
11875         if (arg2) {
11876             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11877             if (!b) {
11878                 return -TARGET_EFAULT;
11879             }
11880         }
11881         ret = get_errno(flistxattr(arg1, b, arg3));
11882         unlock_user(b, arg2, arg3);
11883         return ret;
11884     }
11885     case TARGET_NR_setxattr:
11886     case TARGET_NR_lsetxattr:
11887         {
11888             void *p, *n, *v = 0;
11889             if (arg3) {
11890                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11891                 if (!v) {
11892                     return -TARGET_EFAULT;
11893                 }
11894             }
11895             p = lock_user_string(arg1);
11896             n = lock_user_string(arg2);
11897             if (p && n) {
11898                 if (num == TARGET_NR_setxattr) {
11899                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11900                 } else {
11901                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11902                 }
11903             } else {
11904                 ret = -TARGET_EFAULT;
11905             }
11906             unlock_user(p, arg1, 0);
11907             unlock_user(n, arg2, 0);
11908             unlock_user(v, arg3, 0);
11909         }
11910         return ret;
11911     case TARGET_NR_fsetxattr:
11912         {
11913             void *n, *v = 0;
11914             if (arg3) {
11915                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11916                 if (!v) {
11917                     return -TARGET_EFAULT;
11918                 }
11919             }
11920             n = lock_user_string(arg2);
11921             if (n) {
11922                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11923             } else {
11924                 ret = -TARGET_EFAULT;
11925             }
11926             unlock_user(n, arg2, 0);
11927             unlock_user(v, arg3, 0);
11928         }
11929         return ret;
11930     case TARGET_NR_getxattr:
11931     case TARGET_NR_lgetxattr:
11932         {
11933             void *p, *n, *v = 0;
11934             if (arg3) {
11935                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11936                 if (!v) {
11937                     return -TARGET_EFAULT;
11938                 }
11939             }
11940             p = lock_user_string(arg1);
11941             n = lock_user_string(arg2);
11942             if (p && n) {
11943                 if (num == TARGET_NR_getxattr) {
11944                     ret = get_errno(getxattr(p, n, v, arg4));
11945                 } else {
11946                     ret = get_errno(lgetxattr(p, n, v, arg4));
11947                 }
11948             } else {
11949                 ret = -TARGET_EFAULT;
11950             }
11951             unlock_user(p, arg1, 0);
11952             unlock_user(n, arg2, 0);
11953             unlock_user(v, arg3, arg4);
11954         }
11955         return ret;
11956     case TARGET_NR_fgetxattr:
11957         {
11958             void *n, *v = 0;
11959             if (arg3) {
11960                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11961                 if (!v) {
11962                     return -TARGET_EFAULT;
11963                 }
11964             }
11965             n = lock_user_string(arg2);
11966             if (n) {
11967                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11968             } else {
11969                 ret = -TARGET_EFAULT;
11970             }
11971             unlock_user(n, arg2, 0);
11972             unlock_user(v, arg3, arg4);
11973         }
11974         return ret;
11975     case TARGET_NR_removexattr:
11976     case TARGET_NR_lremovexattr:
11977         {
11978             void *p, *n;
11979             p = lock_user_string(arg1);
11980             n = lock_user_string(arg2);
11981             if (p && n) {
11982                 if (num == TARGET_NR_removexattr) {
11983                     ret = get_errno(removexattr(p, n));
11984                 } else {
11985                     ret = get_errno(lremovexattr(p, n));
11986                 }
11987             } else {
11988                 ret = -TARGET_EFAULT;
11989             }
11990             unlock_user(p, arg1, 0);
11991             unlock_user(n, arg2, 0);
11992         }
11993         return ret;
11994     case TARGET_NR_fremovexattr:
11995         {
11996             void *n;
11997             n = lock_user_string(arg2);
11998             if (n) {
11999                 ret = get_errno(fremovexattr(arg1, n));
12000             } else {
12001                 ret = -TARGET_EFAULT;
12002             }
12003             unlock_user(n, arg2, 0);
12004         }
12005         return ret;
12006 #endif
12007 #endif /* CONFIG_ATTR */
12008 #ifdef TARGET_NR_set_thread_area
12009     case TARGET_NR_set_thread_area:
12010 #if defined(TARGET_MIPS)
12011       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12012       return 0;
12013 #elif defined(TARGET_CRIS)
12014       if (arg1 & 0xff)
12015           ret = -TARGET_EINVAL;
12016       else {
12017           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12018           ret = 0;
12019       }
12020       return ret;
12021 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12022       return do_set_thread_area(cpu_env, arg1);
12023 #elif defined(TARGET_M68K)
12024       {
12025           TaskState *ts = cpu->opaque;
12026           ts->tp_value = arg1;
12027           return 0;
12028       }
12029 #else
12030       return -TARGET_ENOSYS;
12031 #endif
12032 #endif
12033 #ifdef TARGET_NR_get_thread_area
12034     case TARGET_NR_get_thread_area:
12035 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12036         return do_get_thread_area(cpu_env, arg1);
12037 #elif defined(TARGET_M68K)
12038         {
12039             TaskState *ts = cpu->opaque;
12040             return ts->tp_value;
12041         }
12042 #else
12043         return -TARGET_ENOSYS;
12044 #endif
12045 #endif
12046 #ifdef TARGET_NR_getdomainname
12047     case TARGET_NR_getdomainname:
12048         return -TARGET_ENOSYS;
12049 #endif
12050 
12051 #ifdef TARGET_NR_clock_settime
12052     case TARGET_NR_clock_settime:
12053     {
12054         struct timespec ts;
12055 
12056         ret = target_to_host_timespec(&ts, arg2);
12057         if (!is_error(ret)) {
12058             ret = get_errno(clock_settime(arg1, &ts));
12059         }
12060         return ret;
12061     }
12062 #endif
12063 #ifdef TARGET_NR_clock_settime64
12064     case TARGET_NR_clock_settime64:
12065     {
12066         struct timespec ts;
12067 
12068         ret = target_to_host_timespec64(&ts, arg2);
12069         if (!is_error(ret)) {
12070             ret = get_errno(clock_settime(arg1, &ts));
12071         }
12072         return ret;
12073     }
12074 #endif
12075 #ifdef TARGET_NR_clock_gettime
12076     case TARGET_NR_clock_gettime:
12077     {
12078         struct timespec ts;
12079         ret = get_errno(clock_gettime(arg1, &ts));
12080         if (!is_error(ret)) {
12081             ret = host_to_target_timespec(arg2, &ts);
12082         }
12083         return ret;
12084     }
12085 #endif
12086 #ifdef TARGET_NR_clock_gettime64
12087     case TARGET_NR_clock_gettime64:
12088     {
12089         struct timespec ts;
12090         ret = get_errno(clock_gettime(arg1, &ts));
12091         if (!is_error(ret)) {
12092             ret = host_to_target_timespec64(arg2, &ts);
12093         }
12094         return ret;
12095     }
12096 #endif
12097 #ifdef TARGET_NR_clock_getres
12098     case TARGET_NR_clock_getres:
12099     {
12100         struct timespec ts;
12101         ret = get_errno(clock_getres(arg1, &ts));
12102         if (!is_error(ret)) {
12103             host_to_target_timespec(arg2, &ts);
12104         }
12105         return ret;
12106     }
12107 #endif
12108 #ifdef TARGET_NR_clock_getres_time64
12109     case TARGET_NR_clock_getres_time64:
12110     {
12111         struct timespec ts;
12112         ret = get_errno(clock_getres(arg1, &ts));
12113         if (!is_error(ret)) {
12114             host_to_target_timespec64(arg2, &ts);
12115         }
12116         return ret;
12117     }
12118 #endif
12119 #ifdef TARGET_NR_clock_nanosleep
12120     case TARGET_NR_clock_nanosleep:
12121     {
12122         struct timespec ts;
12123         if (target_to_host_timespec(&ts, arg3)) {
12124             return -TARGET_EFAULT;
12125         }
12126         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12127                                              &ts, arg4 ? &ts : NULL));
12128         /*
12129          * if the call is interrupted by a signal handler, it fails
12130          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12131          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12132          */
12133         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12134             host_to_target_timespec(arg4, &ts)) {
12135               return -TARGET_EFAULT;
12136         }
12137 
12138         return ret;
12139     }
12140 #endif
12141 #ifdef TARGET_NR_clock_nanosleep_time64
12142     case TARGET_NR_clock_nanosleep_time64:
12143     {
12144         struct timespec ts;
12145 
12146         if (target_to_host_timespec64(&ts, arg3)) {
12147             return -TARGET_EFAULT;
12148         }
12149 
12150         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12151                                              &ts, arg4 ? &ts : NULL));
12152 
12153         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12154             host_to_target_timespec64(arg4, &ts)) {
12155             return -TARGET_EFAULT;
12156         }
12157         return ret;
12158     }
12159 #endif
12160 
12161 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12162     case TARGET_NR_set_tid_address:
12163         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12164 #endif
12165 
12166     case TARGET_NR_tkill:
12167         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12168 
12169     case TARGET_NR_tgkill:
12170         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12171                          target_to_host_signal(arg3)));
12172 
12173 #ifdef TARGET_NR_set_robust_list
12174     case TARGET_NR_set_robust_list:
12175     case TARGET_NR_get_robust_list:
12176         /* The ABI for supporting robust futexes has userspace pass
12177          * the kernel a pointer to a linked list which is updated by
12178          * userspace after the syscall; the list is walked by the kernel
12179          * when the thread exits. Since the linked list in QEMU guest
12180          * memory isn't a valid linked list for the host and we have
12181          * no way to reliably intercept the thread-death event, we can't
12182          * support these. Silently return ENOSYS so that guest userspace
12183          * falls back to a non-robust futex implementation (which should
12184          * be OK except in the corner case of the guest crashing while
12185          * holding a mutex that is shared with another process via
12186          * shared memory).
12187          */
12188         return -TARGET_ENOSYS;
12189 #endif
12190 
12191 #if defined(TARGET_NR_utimensat)
12192     case TARGET_NR_utimensat:
12193         {
12194             struct timespec *tsp, ts[2];
12195             if (!arg3) {
12196                 tsp = NULL;
12197             } else {
12198                 if (target_to_host_timespec(ts, arg3)) {
12199                     return -TARGET_EFAULT;
12200                 }
12201                 if (target_to_host_timespec(ts + 1, arg3 +
12202                                             sizeof(struct target_timespec))) {
12203                     return -TARGET_EFAULT;
12204                 }
12205                 tsp = ts;
12206             }
12207             if (!arg2)
12208                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12209             else {
12210                 if (!(p = lock_user_string(arg2))) {
12211                     return -TARGET_EFAULT;
12212                 }
12213                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12214                 unlock_user(p, arg2, 0);
12215             }
12216         }
12217         return ret;
12218 #endif
12219 #ifdef TARGET_NR_utimensat_time64
12220     case TARGET_NR_utimensat_time64:
12221         {
12222             struct timespec *tsp, ts[2];
12223             if (!arg3) {
12224                 tsp = NULL;
12225             } else {
12226                 if (target_to_host_timespec64(ts, arg3)) {
12227                     return -TARGET_EFAULT;
12228                 }
12229                 if (target_to_host_timespec64(ts + 1, arg3 +
12230                                      sizeof(struct target__kernel_timespec))) {
12231                     return -TARGET_EFAULT;
12232                 }
12233                 tsp = ts;
12234             }
12235             if (!arg2)
12236                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12237             else {
12238                 p = lock_user_string(arg2);
12239                 if (!p) {
12240                     return -TARGET_EFAULT;
12241                 }
12242                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12243                 unlock_user(p, arg2, 0);
12244             }
12245         }
12246         return ret;
12247 #endif
12248 #ifdef TARGET_NR_futex
12249     case TARGET_NR_futex:
12250         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12251 #endif
12252 #ifdef TARGET_NR_futex_time64
12253     case TARGET_NR_futex_time64:
12254         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12255 #endif
12256 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12257     case TARGET_NR_inotify_init:
12258         ret = get_errno(sys_inotify_init());
12259         if (ret >= 0) {
12260             fd_trans_register(ret, &target_inotify_trans);
12261         }
12262         return ret;
12263 #endif
12264 #ifdef CONFIG_INOTIFY1
12265 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12266     case TARGET_NR_inotify_init1:
12267         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12268                                           fcntl_flags_tbl)));
12269         if (ret >= 0) {
12270             fd_trans_register(ret, &target_inotify_trans);
12271         }
12272         return ret;
12273 #endif
12274 #endif
12275 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12276     case TARGET_NR_inotify_add_watch:
12277         p = lock_user_string(arg2);
12278         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12279         unlock_user(p, arg2, 0);
12280         return ret;
12281 #endif
12282 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12283     case TARGET_NR_inotify_rm_watch:
12284         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12285 #endif
12286 
12287 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12288     case TARGET_NR_mq_open:
12289         {
12290             struct mq_attr posix_mq_attr;
12291             struct mq_attr *pposix_mq_attr;
12292             int host_flags;
12293 
12294             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12295             pposix_mq_attr = NULL;
12296             if (arg4) {
12297                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12298                     return -TARGET_EFAULT;
12299                 }
12300                 pposix_mq_attr = &posix_mq_attr;
12301             }
12302             p = lock_user_string(arg1 - 1);
12303             if (!p) {
12304                 return -TARGET_EFAULT;
12305             }
12306             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12307             unlock_user (p, arg1, 0);
12308         }
12309         return ret;
12310 
12311     case TARGET_NR_mq_unlink:
12312         p = lock_user_string(arg1 - 1);
12313         if (!p) {
12314             return -TARGET_EFAULT;
12315         }
12316         ret = get_errno(mq_unlink(p));
12317         unlock_user (p, arg1, 0);
12318         return ret;
12319 
12320 #ifdef TARGET_NR_mq_timedsend
12321     case TARGET_NR_mq_timedsend:
12322         {
12323             struct timespec ts;
12324 
12325             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12326             if (arg5 != 0) {
12327                 if (target_to_host_timespec(&ts, arg5)) {
12328                     return -TARGET_EFAULT;
12329                 }
12330                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12331                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12332                     return -TARGET_EFAULT;
12333                 }
12334             } else {
12335                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12336             }
12337             unlock_user (p, arg2, arg3);
12338         }
12339         return ret;
12340 #endif
12341 #ifdef TARGET_NR_mq_timedsend_time64
12342     case TARGET_NR_mq_timedsend_time64:
12343         {
12344             struct timespec ts;
12345 
12346             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12347             if (arg5 != 0) {
12348                 if (target_to_host_timespec64(&ts, arg5)) {
12349                     return -TARGET_EFAULT;
12350                 }
12351                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12352                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12353                     return -TARGET_EFAULT;
12354                 }
12355             } else {
12356                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12357             }
12358             unlock_user(p, arg2, arg3);
12359         }
12360         return ret;
12361 #endif
12362 
12363 #ifdef TARGET_NR_mq_timedreceive
12364     case TARGET_NR_mq_timedreceive:
12365         {
12366             struct timespec ts;
12367             unsigned int prio;
12368 
12369             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12370             if (arg5 != 0) {
12371                 if (target_to_host_timespec(&ts, arg5)) {
12372                     return -TARGET_EFAULT;
12373                 }
12374                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12375                                                      &prio, &ts));
12376                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12377                     return -TARGET_EFAULT;
12378                 }
12379             } else {
12380                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12381                                                      &prio, NULL));
12382             }
12383             unlock_user (p, arg2, arg3);
12384             if (arg4 != 0)
12385                 put_user_u32(prio, arg4);
12386         }
12387         return ret;
12388 #endif
12389 #ifdef TARGET_NR_mq_timedreceive_time64
12390     case TARGET_NR_mq_timedreceive_time64:
12391         {
12392             struct timespec ts;
12393             unsigned int prio;
12394 
12395             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12396             if (arg5 != 0) {
12397                 if (target_to_host_timespec64(&ts, arg5)) {
12398                     return -TARGET_EFAULT;
12399                 }
12400                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12401                                                      &prio, &ts));
12402                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12403                     return -TARGET_EFAULT;
12404                 }
12405             } else {
12406                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12407                                                      &prio, NULL));
12408             }
12409             unlock_user(p, arg2, arg3);
12410             if (arg4 != 0) {
12411                 put_user_u32(prio, arg4);
12412             }
12413         }
12414         return ret;
12415 #endif
12416 
12417     /* Not implemented for now... */
12418 /*     case TARGET_NR_mq_notify: */
12419 /*         break; */
12420 
12421     case TARGET_NR_mq_getsetattr:
12422         {
12423             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12424             ret = 0;
12425             if (arg2 != 0) {
12426                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12427                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12428                                            &posix_mq_attr_out));
12429             } else if (arg3 != 0) {
12430                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12431             }
12432             if (ret == 0 && arg3 != 0) {
12433                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12434             }
12435         }
12436         return ret;
12437 #endif
12438 
12439 #ifdef CONFIG_SPLICE
12440 #ifdef TARGET_NR_tee
12441     case TARGET_NR_tee:
12442         {
12443             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12444         }
12445         return ret;
12446 #endif
12447 #ifdef TARGET_NR_splice
12448     case TARGET_NR_splice:
12449         {
12450             loff_t loff_in, loff_out;
12451             loff_t *ploff_in = NULL, *ploff_out = NULL;
12452             if (arg2) {
12453                 if (get_user_u64(loff_in, arg2)) {
12454                     return -TARGET_EFAULT;
12455                 }
12456                 ploff_in = &loff_in;
12457             }
12458             if (arg4) {
12459                 if (get_user_u64(loff_out, arg4)) {
12460                     return -TARGET_EFAULT;
12461                 }
12462                 ploff_out = &loff_out;
12463             }
12464             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12465             if (arg2) {
12466                 if (put_user_u64(loff_in, arg2)) {
12467                     return -TARGET_EFAULT;
12468                 }
12469             }
12470             if (arg4) {
12471                 if (put_user_u64(loff_out, arg4)) {
12472                     return -TARGET_EFAULT;
12473                 }
12474             }
12475         }
12476         return ret;
12477 #endif
12478 #ifdef TARGET_NR_vmsplice
12479 	case TARGET_NR_vmsplice:
12480         {
12481             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12482             if (vec != NULL) {
12483                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12484                 unlock_iovec(vec, arg2, arg3, 0);
12485             } else {
12486                 ret = -host_to_target_errno(errno);
12487             }
12488         }
12489         return ret;
12490 #endif
12491 #endif /* CONFIG_SPLICE */
12492 #ifdef CONFIG_EVENTFD
12493 #if defined(TARGET_NR_eventfd)
12494     case TARGET_NR_eventfd:
12495         ret = get_errno(eventfd(arg1, 0));
12496         if (ret >= 0) {
12497             fd_trans_register(ret, &target_eventfd_trans);
12498         }
12499         return ret;
12500 #endif
12501 #if defined(TARGET_NR_eventfd2)
12502     case TARGET_NR_eventfd2:
12503     {
12504         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12505         if (arg2 & TARGET_O_NONBLOCK) {
12506             host_flags |= O_NONBLOCK;
12507         }
12508         if (arg2 & TARGET_O_CLOEXEC) {
12509             host_flags |= O_CLOEXEC;
12510         }
12511         ret = get_errno(eventfd(arg1, host_flags));
12512         if (ret >= 0) {
12513             fd_trans_register(ret, &target_eventfd_trans);
12514         }
12515         return ret;
12516     }
12517 #endif
12518 #endif /* CONFIG_EVENTFD  */
12519 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12520     case TARGET_NR_fallocate:
12521 #if TARGET_ABI_BITS == 32
12522         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12523                                   target_offset64(arg5, arg6)));
12524 #else
12525         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12526 #endif
12527         return ret;
12528 #endif
12529 #if defined(CONFIG_SYNC_FILE_RANGE)
12530 #if defined(TARGET_NR_sync_file_range)
12531     case TARGET_NR_sync_file_range:
12532 #if TARGET_ABI_BITS == 32
12533 #if defined(TARGET_MIPS)
12534         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12535                                         target_offset64(arg5, arg6), arg7));
12536 #else
12537         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12538                                         target_offset64(arg4, arg5), arg6));
12539 #endif /* !TARGET_MIPS */
12540 #else
12541         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12542 #endif
12543         return ret;
12544 #endif
12545 #if defined(TARGET_NR_sync_file_range2) || \
12546     defined(TARGET_NR_arm_sync_file_range)
12547 #if defined(TARGET_NR_sync_file_range2)
12548     case TARGET_NR_sync_file_range2:
12549 #endif
12550 #if defined(TARGET_NR_arm_sync_file_range)
12551     case TARGET_NR_arm_sync_file_range:
12552 #endif
12553         /* This is like sync_file_range but the arguments are reordered */
12554 #if TARGET_ABI_BITS == 32
12555         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12556                                         target_offset64(arg5, arg6), arg2));
12557 #else
12558         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12559 #endif
12560         return ret;
12561 #endif
12562 #endif
12563 #if defined(TARGET_NR_signalfd4)
12564     case TARGET_NR_signalfd4:
12565         return do_signalfd4(arg1, arg2, arg4);
12566 #endif
12567 #if defined(TARGET_NR_signalfd)
12568     case TARGET_NR_signalfd:
12569         return do_signalfd4(arg1, arg2, 0);
12570 #endif
12571 #if defined(CONFIG_EPOLL)
12572 #if defined(TARGET_NR_epoll_create)
12573     case TARGET_NR_epoll_create:
12574         return get_errno(epoll_create(arg1));
12575 #endif
12576 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12577     case TARGET_NR_epoll_create1:
12578         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12579 #endif
12580 #if defined(TARGET_NR_epoll_ctl)
12581     case TARGET_NR_epoll_ctl:
12582     {
12583         struct epoll_event ep;
12584         struct epoll_event *epp = 0;
12585         if (arg4) {
12586             if (arg2 != EPOLL_CTL_DEL) {
12587                 struct target_epoll_event *target_ep;
12588                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12589                     return -TARGET_EFAULT;
12590                 }
12591                 ep.events = tswap32(target_ep->events);
12592                 /*
12593                  * The epoll_data_t union is just opaque data to the kernel,
12594                  * so we transfer all 64 bits across and need not worry what
12595                  * actual data type it is.
12596                  */
12597                 ep.data.u64 = tswap64(target_ep->data.u64);
12598                 unlock_user_struct(target_ep, arg4, 0);
12599             }
12600             /*
12601              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12602              * non-null pointer, even though this argument is ignored.
12603              *
12604              */
12605             epp = &ep;
12606         }
12607         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12608     }
12609 #endif
12610 
12611 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12612 #if defined(TARGET_NR_epoll_wait)
12613     case TARGET_NR_epoll_wait:
12614 #endif
12615 #if defined(TARGET_NR_epoll_pwait)
12616     case TARGET_NR_epoll_pwait:
12617 #endif
12618     {
12619         struct target_epoll_event *target_ep;
12620         struct epoll_event *ep;
12621         int epfd = arg1;
12622         int maxevents = arg3;
12623         int timeout = arg4;
12624 
12625         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12626             return -TARGET_EINVAL;
12627         }
12628 
12629         target_ep = lock_user(VERIFY_WRITE, arg2,
12630                               maxevents * sizeof(struct target_epoll_event), 1);
12631         if (!target_ep) {
12632             return -TARGET_EFAULT;
12633         }
12634 
12635         ep = g_try_new(struct epoll_event, maxevents);
12636         if (!ep) {
12637             unlock_user(target_ep, arg2, 0);
12638             return -TARGET_ENOMEM;
12639         }
12640 
12641         switch (num) {
12642 #if defined(TARGET_NR_epoll_pwait)
12643         case TARGET_NR_epoll_pwait:
12644         {
12645             target_sigset_t *target_set;
12646             sigset_t _set, *set = &_set;
12647 
12648             if (arg5) {
12649                 if (arg6 != sizeof(target_sigset_t)) {
12650                     ret = -TARGET_EINVAL;
12651                     break;
12652                 }
12653 
12654                 target_set = lock_user(VERIFY_READ, arg5,
12655                                        sizeof(target_sigset_t), 1);
12656                 if (!target_set) {
12657                     ret = -TARGET_EFAULT;
12658                     break;
12659                 }
12660                 target_to_host_sigset(set, target_set);
12661                 unlock_user(target_set, arg5, 0);
12662             } else {
12663                 set = NULL;
12664             }
12665 
12666             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12667                                              set, SIGSET_T_SIZE));
12668             break;
12669         }
12670 #endif
12671 #if defined(TARGET_NR_epoll_wait)
12672         case TARGET_NR_epoll_wait:
12673             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12674                                              NULL, 0));
12675             break;
12676 #endif
12677         default:
12678             ret = -TARGET_ENOSYS;
12679         }
12680         if (!is_error(ret)) {
12681             int i;
12682             for (i = 0; i < ret; i++) {
12683                 target_ep[i].events = tswap32(ep[i].events);
12684                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12685             }
12686             unlock_user(target_ep, arg2,
12687                         ret * sizeof(struct target_epoll_event));
12688         } else {
12689             unlock_user(target_ep, arg2, 0);
12690         }
12691         g_free(ep);
12692         return ret;
12693     }
12694 #endif
12695 #endif
12696 #ifdef TARGET_NR_prlimit64
12697     case TARGET_NR_prlimit64:
12698     {
12699         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12700         struct target_rlimit64 *target_rnew, *target_rold;
12701         struct host_rlimit64 rnew, rold, *rnewp = 0;
12702         int resource = target_to_host_resource(arg2);
12703 
12704         if (arg3 && (resource != RLIMIT_AS &&
12705                      resource != RLIMIT_DATA &&
12706                      resource != RLIMIT_STACK)) {
12707             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12708                 return -TARGET_EFAULT;
12709             }
12710             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12711             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12712             unlock_user_struct(target_rnew, arg3, 0);
12713             rnewp = &rnew;
12714         }
12715 
12716         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12717         if (!is_error(ret) && arg4) {
12718             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12719                 return -TARGET_EFAULT;
12720             }
12721             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12722             target_rold->rlim_max = tswap64(rold.rlim_max);
12723             unlock_user_struct(target_rold, arg4, 1);
12724         }
12725         return ret;
12726     }
12727 #endif
12728 #ifdef TARGET_NR_gethostname
12729     case TARGET_NR_gethostname:
12730     {
12731         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12732         if (name) {
12733             ret = get_errno(gethostname(name, arg2));
12734             unlock_user(name, arg1, arg2);
12735         } else {
12736             ret = -TARGET_EFAULT;
12737         }
12738         return ret;
12739     }
12740 #endif
12741 #ifdef TARGET_NR_atomic_cmpxchg_32
12742     case TARGET_NR_atomic_cmpxchg_32:
12743     {
12744         /* should use start_exclusive from main.c */
12745         abi_ulong mem_value;
12746         if (get_user_u32(mem_value, arg6)) {
12747             target_siginfo_t info;
12748             info.si_signo = SIGSEGV;
12749             info.si_errno = 0;
12750             info.si_code = TARGET_SEGV_MAPERR;
12751             info._sifields._sigfault._addr = arg6;
12752             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12753                          QEMU_SI_FAULT, &info);
12754             ret = 0xdeadbeef;
12755 
12756         }
12757         if (mem_value == arg2)
12758             put_user_u32(arg1, arg6);
12759         return mem_value;
12760     }
12761 #endif
12762 #ifdef TARGET_NR_atomic_barrier
12763     case TARGET_NR_atomic_barrier:
12764         /* Like the kernel implementation and the
12765            qemu arm barrier, no-op this? */
12766         return 0;
12767 #endif
12768 
12769 #ifdef TARGET_NR_timer_create
12770     case TARGET_NR_timer_create:
12771     {
12772         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12773 
12774         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12775 
12776         int clkid = arg1;
12777         int timer_index = next_free_host_timer();
12778 
12779         if (timer_index < 0) {
12780             ret = -TARGET_EAGAIN;
12781         } else {
12782             timer_t *phtimer = g_posix_timers  + timer_index;
12783 
12784             if (arg2) {
12785                 phost_sevp = &host_sevp;
12786                 ret = target_to_host_sigevent(phost_sevp, arg2);
12787                 if (ret != 0) {
12788                     return ret;
12789                 }
12790             }
12791 
12792             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12793             if (ret) {
12794                 phtimer = NULL;
12795             } else {
12796                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12797                     return -TARGET_EFAULT;
12798                 }
12799             }
12800         }
12801         return ret;
12802     }
12803 #endif
12804 
12805 #ifdef TARGET_NR_timer_settime
12806     case TARGET_NR_timer_settime:
12807     {
12808         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12809          * struct itimerspec * old_value */
12810         target_timer_t timerid = get_timer_id(arg1);
12811 
12812         if (timerid < 0) {
12813             ret = timerid;
12814         } else if (arg3 == 0) {
12815             ret = -TARGET_EINVAL;
12816         } else {
12817             timer_t htimer = g_posix_timers[timerid];
12818             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12819 
12820             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12821                 return -TARGET_EFAULT;
12822             }
12823             ret = get_errno(
12824                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12825             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12826                 return -TARGET_EFAULT;
12827             }
12828         }
12829         return ret;
12830     }
12831 #endif
12832 
12833 #ifdef TARGET_NR_timer_settime64
12834     case TARGET_NR_timer_settime64:
12835     {
12836         target_timer_t timerid = get_timer_id(arg1);
12837 
12838         if (timerid < 0) {
12839             ret = timerid;
12840         } else if (arg3 == 0) {
12841             ret = -TARGET_EINVAL;
12842         } else {
12843             timer_t htimer = g_posix_timers[timerid];
12844             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12845 
12846             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12847                 return -TARGET_EFAULT;
12848             }
12849             ret = get_errno(
12850                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12851             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12852                 return -TARGET_EFAULT;
12853             }
12854         }
12855         return ret;
12856     }
12857 #endif
12858 
12859 #ifdef TARGET_NR_timer_gettime
12860     case TARGET_NR_timer_gettime:
12861     {
12862         /* args: timer_t timerid, struct itimerspec *curr_value */
12863         target_timer_t timerid = get_timer_id(arg1);
12864 
12865         if (timerid < 0) {
12866             ret = timerid;
12867         } else if (!arg2) {
12868             ret = -TARGET_EFAULT;
12869         } else {
12870             timer_t htimer = g_posix_timers[timerid];
12871             struct itimerspec hspec;
12872             ret = get_errno(timer_gettime(htimer, &hspec));
12873 
12874             if (host_to_target_itimerspec(arg2, &hspec)) {
12875                 ret = -TARGET_EFAULT;
12876             }
12877         }
12878         return ret;
12879     }
12880 #endif
12881 
12882 #ifdef TARGET_NR_timer_gettime64
12883     case TARGET_NR_timer_gettime64:
12884     {
12885         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12886         target_timer_t timerid = get_timer_id(arg1);
12887 
12888         if (timerid < 0) {
12889             ret = timerid;
12890         } else if (!arg2) {
12891             ret = -TARGET_EFAULT;
12892         } else {
12893             timer_t htimer = g_posix_timers[timerid];
12894             struct itimerspec hspec;
12895             ret = get_errno(timer_gettime(htimer, &hspec));
12896 
12897             if (host_to_target_itimerspec64(arg2, &hspec)) {
12898                 ret = -TARGET_EFAULT;
12899             }
12900         }
12901         return ret;
12902     }
12903 #endif
12904 
12905 #ifdef TARGET_NR_timer_getoverrun
12906     case TARGET_NR_timer_getoverrun:
12907     {
12908         /* args: timer_t timerid */
12909         target_timer_t timerid = get_timer_id(arg1);
12910 
12911         if (timerid < 0) {
12912             ret = timerid;
12913         } else {
12914             timer_t htimer = g_posix_timers[timerid];
12915             ret = get_errno(timer_getoverrun(htimer));
12916         }
12917         return ret;
12918     }
12919 #endif
12920 
12921 #ifdef TARGET_NR_timer_delete
12922     case TARGET_NR_timer_delete:
12923     {
12924         /* args: timer_t timerid */
12925         target_timer_t timerid = get_timer_id(arg1);
12926 
12927         if (timerid < 0) {
12928             ret = timerid;
12929         } else {
12930             timer_t htimer = g_posix_timers[timerid];
12931             ret = get_errno(timer_delete(htimer));
12932             g_posix_timers[timerid] = 0;
12933         }
12934         return ret;
12935     }
12936 #endif
12937 
12938 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12939     case TARGET_NR_timerfd_create:
12940         return get_errno(timerfd_create(arg1,
12941                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12942 #endif
12943 
12944 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12945     case TARGET_NR_timerfd_gettime:
12946         {
12947             struct itimerspec its_curr;
12948 
12949             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12950 
12951             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12952                 return -TARGET_EFAULT;
12953             }
12954         }
12955         return ret;
12956 #endif
12957 
12958 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12959     case TARGET_NR_timerfd_gettime64:
12960         {
12961             struct itimerspec its_curr;
12962 
12963             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12964 
12965             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12966                 return -TARGET_EFAULT;
12967             }
12968         }
12969         return ret;
12970 #endif
12971 
12972 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12973     case TARGET_NR_timerfd_settime:
12974         {
12975             struct itimerspec its_new, its_old, *p_new;
12976 
12977             if (arg3) {
12978                 if (target_to_host_itimerspec(&its_new, arg3)) {
12979                     return -TARGET_EFAULT;
12980                 }
12981                 p_new = &its_new;
12982             } else {
12983                 p_new = NULL;
12984             }
12985 
12986             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12987 
12988             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12989                 return -TARGET_EFAULT;
12990             }
12991         }
12992         return ret;
12993 #endif
12994 
12995 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12996     case TARGET_NR_timerfd_settime64:
12997         {
12998             struct itimerspec its_new, its_old, *p_new;
12999 
13000             if (arg3) {
13001                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13002                     return -TARGET_EFAULT;
13003                 }
13004                 p_new = &its_new;
13005             } else {
13006                 p_new = NULL;
13007             }
13008 
13009             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13010 
13011             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13012                 return -TARGET_EFAULT;
13013             }
13014         }
13015         return ret;
13016 #endif
13017 
13018 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13019     case TARGET_NR_ioprio_get:
13020         return get_errno(ioprio_get(arg1, arg2));
13021 #endif
13022 
13023 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13024     case TARGET_NR_ioprio_set:
13025         return get_errno(ioprio_set(arg1, arg2, arg3));
13026 #endif
13027 
13028 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13029     case TARGET_NR_setns:
13030         return get_errno(setns(arg1, arg2));
13031 #endif
13032 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13033     case TARGET_NR_unshare:
13034         return get_errno(unshare(arg1));
13035 #endif
13036 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13037     case TARGET_NR_kcmp:
13038         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13039 #endif
13040 #ifdef TARGET_NR_swapcontext
13041     case TARGET_NR_swapcontext:
13042         /* PowerPC specific.  */
13043         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13044 #endif
13045 #ifdef TARGET_NR_memfd_create
13046     case TARGET_NR_memfd_create:
13047         p = lock_user_string(arg1);
13048         if (!p) {
13049             return -TARGET_EFAULT;
13050         }
13051         ret = get_errno(memfd_create(p, arg2));
13052         fd_trans_unregister(ret);
13053         unlock_user(p, arg1, 0);
13054         return ret;
13055 #endif
13056 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13057     case TARGET_NR_membarrier:
13058         return get_errno(membarrier(arg1, arg2));
13059 #endif
13060 
13061 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13062     case TARGET_NR_copy_file_range:
13063         {
13064             loff_t inoff, outoff;
13065             loff_t *pinoff = NULL, *poutoff = NULL;
13066 
13067             if (arg2) {
13068                 if (get_user_u64(inoff, arg2)) {
13069                     return -TARGET_EFAULT;
13070                 }
13071                 pinoff = &inoff;
13072             }
13073             if (arg4) {
13074                 if (get_user_u64(outoff, arg4)) {
13075                     return -TARGET_EFAULT;
13076                 }
13077                 poutoff = &outoff;
13078             }
13079             /* Do not sign-extend the count parameter. */
13080             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13081                                                  (abi_ulong)arg5, arg6));
13082             if (!is_error(ret) && ret > 0) {
13083                 if (arg2) {
13084                     if (put_user_u64(inoff, arg2)) {
13085                         return -TARGET_EFAULT;
13086                     }
13087                 }
13088                 if (arg4) {
13089                     if (put_user_u64(outoff, arg4)) {
13090                         return -TARGET_EFAULT;
13091                     }
13092                 }
13093             }
13094         }
13095         return ret;
13096 #endif
13097 
13098 #if defined(TARGET_NR_pivot_root)
13099     case TARGET_NR_pivot_root:
13100         {
13101             void *p2;
13102             p = lock_user_string(arg1); /* new_root */
13103             p2 = lock_user_string(arg2); /* put_old */
13104             if (!p || !p2) {
13105                 ret = -TARGET_EFAULT;
13106             } else {
13107                 ret = get_errno(pivot_root(p, p2));
13108             }
13109             unlock_user(p2, arg2, 0);
13110             unlock_user(p, arg1, 0);
13111         }
13112         return ret;
13113 #endif
13114 
13115     default:
13116         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13117         return -TARGET_ENOSYS;
13118     }
13119     return ret;
13120 }
13121 
13122 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13123                     abi_long arg2, abi_long arg3, abi_long arg4,
13124                     abi_long arg5, abi_long arg6, abi_long arg7,
13125                     abi_long arg8)
13126 {
13127     CPUState *cpu = env_cpu(cpu_env);
13128     abi_long ret;
13129 
13130 #ifdef DEBUG_ERESTARTSYS
13131     /* Debug-only code for exercising the syscall-restart code paths
13132      * in the per-architecture cpu main loops: restart every syscall
13133      * the guest makes once before letting it through.
13134      */
13135     {
13136         static bool flag;
13137         flag = !flag;
13138         if (flag) {
13139             return -TARGET_ERESTARTSYS;
13140         }
13141     }
13142 #endif
13143 
13144     record_syscall_start(cpu, num, arg1,
13145                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13146 
13147     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13148         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13149     }
13150 
13151     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13152                       arg5, arg6, arg7, arg8);
13153 
13154     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13155         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13156                           arg3, arg4, arg5, arg6);
13157     }
13158 
13159     record_syscall_return(cpu, num, ret);
13160     return ret;
13161 }
13162