xref: /openbmc/qemu/linux-user/syscall.c (revision 3b249d26)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "qapi/error.h"
140 #include "fd-trans.h"
141 #include "tcg/tcg.h"
142 
143 #ifndef CLONE_IO
144 #define CLONE_IO                0x80000000      /* Clone io context */
145 #endif
146 
147 /* We can't directly call the host clone syscall, because this will
148  * badly confuse libc (breaking mutexes, for example). So we must
149  * divide clone flags into:
150  *  * flag combinations that look like pthread_create()
151  *  * flag combinations that look like fork()
152  *  * flags we can implement within QEMU itself
153  *  * flags we can't support and will return an error for
154  */
155 /* For thread creation, all these flags must be present; for
156  * fork, none must be present.
157  */
158 #define CLONE_THREAD_FLAGS                              \
159     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
160      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
161 
162 /* These flags are ignored:
163  * CLONE_DETACHED is now ignored by the kernel;
164  * CLONE_IO is just an optimisation hint to the I/O scheduler
165  */
166 #define CLONE_IGNORED_FLAGS                     \
167     (CLONE_DETACHED | CLONE_IO)
168 
169 /* Flags for fork which we can implement within QEMU itself */
170 #define CLONE_OPTIONAL_FORK_FLAGS               \
171     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
172      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
173 
174 /* Flags for thread creation which we can implement within QEMU itself */
175 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
176     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
177      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
178 
179 #define CLONE_INVALID_FORK_FLAGS                                        \
180     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
181 
182 #define CLONE_INVALID_THREAD_FLAGS                                      \
183     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
184        CLONE_IGNORED_FLAGS))
185 
186 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
187  * have almost all been allocated. We cannot support any of
188  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
189  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
190  * The checks against the invalid thread masks above will catch these.
191  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
192  */
193 
194 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
195  * once. This exercises the codepaths for restart.
196  */
197 //#define DEBUG_ERESTARTSYS
198 
199 //#include <linux/msdos_fs.h>
200 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
201 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
202 
203 #undef _syscall0
204 #undef _syscall1
205 #undef _syscall2
206 #undef _syscall3
207 #undef _syscall4
208 #undef _syscall5
209 #undef _syscall6
210 
211 #define _syscall0(type,name)		\
212 static type name (void)			\
213 {					\
214 	return syscall(__NR_##name);	\
215 }
216 
217 #define _syscall1(type,name,type1,arg1)		\
218 static type name (type1 arg1)			\
219 {						\
220 	return syscall(__NR_##name, arg1);	\
221 }
222 
223 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
224 static type name (type1 arg1,type2 arg2)		\
225 {							\
226 	return syscall(__NR_##name, arg1, arg2);	\
227 }
228 
229 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3)		\
231 {								\
232 	return syscall(__NR_##name, arg1, arg2, arg3);		\
233 }
234 
235 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
236 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
237 {										\
238 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
239 }
240 
241 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
242 		  type5,arg5)							\
243 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
244 {										\
245 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
246 }
247 
248 
249 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
250 		  type5,arg5,type6,arg6)					\
251 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
252                   type6 arg6)							\
253 {										\
254 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
255 }
256 
257 
258 #define __NR_sys_uname __NR_uname
259 #define __NR_sys_getcwd1 __NR_getcwd
260 #define __NR_sys_getdents __NR_getdents
261 #define __NR_sys_getdents64 __NR_getdents64
262 #define __NR_sys_getpriority __NR_getpriority
263 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
264 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
265 #define __NR_sys_syslog __NR_syslog
266 #if defined(__NR_futex)
267 # define __NR_sys_futex __NR_futex
268 #endif
269 #if defined(__NR_futex_time64)
270 # define __NR_sys_futex_time64 __NR_futex_time64
271 #endif
272 #define __NR_sys_inotify_init __NR_inotify_init
273 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
274 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
275 #define __NR_sys_statx __NR_statx
276 
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
280 
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
285 
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
290 
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
293 
294 /* For the 64-bit guest on 32-bit host case we must emulate
295  * getdents using getdents64, because otherwise the host
296  * might hand us back more dirent records than we can fit
297  * into the guest buffer after structure format conversion.
298  * Otherwise we emulate getdents with getdents if the host has it.
299  */
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
303 
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
314           loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318           siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address,int *,tidptr)
325 #endif
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
328           const struct timespec *,timeout,int *,uaddr2,int,val3)
329 #endif
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
332           const struct timespec *,timeout,int *,uaddr2,int,val3)
333 #endif
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
336           unsigned long *, user_mask_ptr);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 #define __NR_sys_getcpu __NR_getcpu
341 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
342 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
343           void *, arg);
344 _syscall2(int, capget, struct __user_cap_header_struct *, header,
345           struct __user_cap_data_struct *, data);
346 _syscall2(int, capset, struct __user_cap_header_struct *, header,
347           struct __user_cap_data_struct *, data);
348 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
349 _syscall2(int, ioprio_get, int, which, int, who)
350 #endif
351 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
352 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
353 #endif
354 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
355 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
356 #endif
357 
358 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
359 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
360           unsigned long, idx1, unsigned long, idx2)
361 #endif
362 
363 /*
364  * It is assumed that struct statx is architecture independent.
365  */
366 #if defined(TARGET_NR_statx) && defined(__NR_statx)
367 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
368           unsigned int, mask, struct target_statx *, statxbuf)
369 #endif
370 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
371 _syscall2(int, membarrier, int, cmd, int, flags)
372 #endif
373 
374 static const bitmask_transtbl fcntl_flags_tbl[] = {
375   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
376   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
377   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
378   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
379   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
380   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
381   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
382   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
383   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
384   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
385   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
386   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
387   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
388 #if defined(O_DIRECT)
389   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
390 #endif
391 #if defined(O_NOATIME)
392   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
393 #endif
394 #if defined(O_CLOEXEC)
395   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
396 #endif
397 #if defined(O_PATH)
398   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
399 #endif
400 #if defined(O_TMPFILE)
401   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
402 #endif
403   /* Don't terminate the list prematurely on 64-bit host+guest.  */
404 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
405   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
406 #endif
407   { 0, 0, 0, 0 }
408 };
409 
410 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
411 
412 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
413 #if defined(__NR_utimensat)
414 #define __NR_sys_utimensat __NR_utimensat
415 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
416           const struct timespec *,tsp,int,flags)
417 #else
418 static int sys_utimensat(int dirfd, const char *pathname,
419                          const struct timespec times[2], int flags)
420 {
421     errno = ENOSYS;
422     return -1;
423 }
424 #endif
425 #endif /* TARGET_NR_utimensat */
426 
427 #ifdef TARGET_NR_renameat2
428 #if defined(__NR_renameat2)
429 #define __NR_sys_renameat2 __NR_renameat2
430 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
431           const char *, new, unsigned int, flags)
432 #else
433 static int sys_renameat2(int oldfd, const char *old,
434                          int newfd, const char *new, int flags)
435 {
436     if (flags == 0) {
437         return renameat(oldfd, old, newfd, new);
438     }
439     errno = ENOSYS;
440     return -1;
441 }
442 #endif
443 #endif /* TARGET_NR_renameat2 */
444 
445 #ifdef CONFIG_INOTIFY
446 #include <sys/inotify.h>
447 
448 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
449 static int sys_inotify_init(void)
450 {
451   return (inotify_init());
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
455 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
456 {
457   return (inotify_add_watch(fd, pathname, mask));
458 }
459 #endif
460 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
461 static int sys_inotify_rm_watch(int fd, int32_t wd)
462 {
463   return (inotify_rm_watch(fd, wd));
464 }
465 #endif
466 #ifdef CONFIG_INOTIFY1
467 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
468 static int sys_inotify_init1(int flags)
469 {
470   return (inotify_init1(flags));
471 }
472 #endif
473 #endif
474 #else
475 /* Userspace can usually survive runtime without inotify */
476 #undef TARGET_NR_inotify_init
477 #undef TARGET_NR_inotify_init1
478 #undef TARGET_NR_inotify_add_watch
479 #undef TARGET_NR_inotify_rm_watch
480 #endif /* CONFIG_INOTIFY  */
481 
482 #if defined(TARGET_NR_prlimit64)
483 #ifndef __NR_prlimit64
484 # define __NR_prlimit64 -1
485 #endif
486 #define __NR_sys_prlimit64 __NR_prlimit64
487 /* The glibc rlimit structure may not be that used by the underlying syscall */
488 struct host_rlimit64 {
489     uint64_t rlim_cur;
490     uint64_t rlim_max;
491 };
492 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
493           const struct host_rlimit64 *, new_limit,
494           struct host_rlimit64 *, old_limit)
495 #endif
496 
497 
498 #if defined(TARGET_NR_timer_create)
499 /* Maximum of 32 active POSIX timers allowed at any one time. */
500 static timer_t g_posix_timers[32] = { 0, } ;
501 
502 static inline int next_free_host_timer(void)
503 {
504     int k ;
505     /* FIXME: Does finding the next free slot require a lock? */
506     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
507         if (g_posix_timers[k] == 0) {
508             g_posix_timers[k] = (timer_t) 1;
509             return k;
510         }
511     }
512     return -1;
513 }
514 #endif
515 
516 static inline int host_to_target_errno(int host_errno)
517 {
518     switch (host_errno) {
519 #define E(X)  case X: return TARGET_##X;
520 #include "errnos.c.inc"
521 #undef E
522     default:
523         return host_errno;
524     }
525 }
526 
527 static inline int target_to_host_errno(int target_errno)
528 {
529     switch (target_errno) {
530 #define E(X)  case TARGET_##X: return X;
531 #include "errnos.c.inc"
532 #undef E
533     default:
534         return target_errno;
535     }
536 }
537 
538 static inline abi_long get_errno(abi_long ret)
539 {
540     if (ret == -1)
541         return -host_to_target_errno(errno);
542     else
543         return ret;
544 }
545 
546 const char *target_strerror(int err)
547 {
548     if (err == TARGET_ERESTARTSYS) {
549         return "To be restarted";
550     }
551     if (err == TARGET_QEMU_ESIGRETURN) {
552         return "Successful exit from sigreturn";
553     }
554 
555     return strerror(target_to_host_errno(err));
556 }
557 
558 #define safe_syscall0(type, name) \
559 static type safe_##name(void) \
560 { \
561     return safe_syscall(__NR_##name); \
562 }
563 
564 #define safe_syscall1(type, name, type1, arg1) \
565 static type safe_##name(type1 arg1) \
566 { \
567     return safe_syscall(__NR_##name, arg1); \
568 }
569 
570 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
571 static type safe_##name(type1 arg1, type2 arg2) \
572 { \
573     return safe_syscall(__NR_##name, arg1, arg2); \
574 }
575 
576 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
577 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
578 { \
579     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
580 }
581 
582 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
583     type4, arg4) \
584 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
585 { \
586     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
587 }
588 
589 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
590     type4, arg4, type5, arg5) \
591 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
592     type5 arg5) \
593 { \
594     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
595 }
596 
597 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
598     type4, arg4, type5, arg5, type6, arg6) \
599 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
600     type5 arg5, type6 arg6) \
601 { \
602     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
603 }
604 
605 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
606 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
607 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
608               int, flags, mode_t, mode)
609 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
610 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
611               struct rusage *, rusage)
612 #endif
613 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
614               int, options, struct rusage *, rusage)
615 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
616 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
617     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
618 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
619               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
620 #endif
621 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
622 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
623               struct timespec *, tsp, const sigset_t *, sigmask,
624               size_t, sigsetsize)
625 #endif
626 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
627               int, maxevents, int, timeout, const sigset_t *, sigmask,
628               size_t, sigsetsize)
629 #if defined(__NR_futex)
630 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
631               const struct timespec *,timeout,int *,uaddr2,int,val3)
632 #endif
633 #if defined(__NR_futex_time64)
634 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
635               const struct timespec *,timeout,int *,uaddr2,int,val3)
636 #endif
637 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
638 safe_syscall2(int, kill, pid_t, pid, int, sig)
639 safe_syscall2(int, tkill, int, tid, int, sig)
640 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
641 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
642 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
643 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
644               unsigned long, pos_l, unsigned long, pos_h)
645 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
646               unsigned long, pos_l, unsigned long, pos_h)
647 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
648               socklen_t, addrlen)
649 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
650               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
651 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
652               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
653 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
654 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
655 safe_syscall2(int, flock, int, fd, int, operation)
656 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
657 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
658               const struct timespec *, uts, size_t, sigsetsize)
659 #endif
660 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
661               int, flags)
662 #if defined(TARGET_NR_nanosleep)
663 safe_syscall2(int, nanosleep, const struct timespec *, req,
664               struct timespec *, rem)
665 #endif
666 #if defined(TARGET_NR_clock_nanosleep) || \
667     defined(TARGET_NR_clock_nanosleep_time64)
668 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
669               const struct timespec *, req, struct timespec *, rem)
670 #endif
671 #ifdef __NR_ipc
672 #ifdef __s390x__
673 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
674               void *, ptr)
675 #else
676 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
677               void *, ptr, long, fifth)
678 #endif
679 #endif
680 #ifdef __NR_msgsnd
681 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
682               int, flags)
683 #endif
684 #ifdef __NR_msgrcv
685 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
686               long, msgtype, int, flags)
687 #endif
688 #ifdef __NR_semtimedop
689 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
690               unsigned, nsops, const struct timespec *, timeout)
691 #endif
692 #if defined(TARGET_NR_mq_timedsend) || \
693     defined(TARGET_NR_mq_timedsend_time64)
694 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
695               size_t, len, unsigned, prio, const struct timespec *, timeout)
696 #endif
697 #if defined(TARGET_NR_mq_timedreceive) || \
698     defined(TARGET_NR_mq_timedreceive_time64)
699 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
700               size_t, len, unsigned *, prio, const struct timespec *, timeout)
701 #endif
702 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
703 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
704               int, outfd, loff_t *, poutoff, size_t, length,
705               unsigned int, flags)
706 #endif
707 
708 /* We do ioctl like this rather than via safe_syscall3 to preserve the
709  * "third argument might be integer or pointer or not present" behaviour of
710  * the libc function.
711  */
712 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
713 /* Similarly for fcntl. Note that callers must always:
714  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
715  *  use the flock64 struct rather than unsuffixed flock
716  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
717  */
718 #ifdef __NR_fcntl64
719 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
720 #else
721 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
722 #endif
723 
724 static inline int host_to_target_sock_type(int host_type)
725 {
726     int target_type;
727 
728     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
729     case SOCK_DGRAM:
730         target_type = TARGET_SOCK_DGRAM;
731         break;
732     case SOCK_STREAM:
733         target_type = TARGET_SOCK_STREAM;
734         break;
735     default:
736         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
737         break;
738     }
739 
740 #if defined(SOCK_CLOEXEC)
741     if (host_type & SOCK_CLOEXEC) {
742         target_type |= TARGET_SOCK_CLOEXEC;
743     }
744 #endif
745 
746 #if defined(SOCK_NONBLOCK)
747     if (host_type & SOCK_NONBLOCK) {
748         target_type |= TARGET_SOCK_NONBLOCK;
749     }
750 #endif
751 
752     return target_type;
753 }
754 
755 static abi_ulong target_brk;
756 static abi_ulong target_original_brk;
757 static abi_ulong brk_page;
758 
759 void target_set_brk(abi_ulong new_brk)
760 {
761     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
762     brk_page = HOST_PAGE_ALIGN(target_brk);
763 }
764 
765 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
766 #define DEBUGF_BRK(message, args...)
767 
768 /* do_brk() must return target values and target errnos. */
769 abi_long do_brk(abi_ulong new_brk)
770 {
771     abi_long mapped_addr;
772     abi_ulong new_alloc_size;
773 
774     /* brk pointers are always untagged */
775 
776     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
777 
778     if (!new_brk) {
779         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
780         return target_brk;
781     }
782     if (new_brk < target_original_brk) {
783         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
784                    target_brk);
785         return target_brk;
786     }
787 
788     /* If the new brk is less than the highest page reserved to the
789      * target heap allocation, set it and we're almost done...  */
790     if (new_brk <= brk_page) {
791         /* Heap contents are initialized to zero, as for anonymous
792          * mapped pages.  */
793         if (new_brk > target_brk) {
794             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
795         }
796 	target_brk = new_brk;
797         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
798 	return target_brk;
799     }
800 
801     /* We need to allocate more memory after the brk... Note that
802      * we don't use MAP_FIXED because that will map over the top of
803      * any existing mapping (like the one with the host libc or qemu
804      * itself); instead we treat "mapped but at wrong address" as
805      * a failure and unmap again.
806      */
807     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
808     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
809                                         PROT_READ|PROT_WRITE,
810                                         MAP_ANON|MAP_PRIVATE, 0, 0));
811 
812     if (mapped_addr == brk_page) {
813         /* Heap contents are initialized to zero, as for anonymous
814          * mapped pages.  Technically the new pages are already
815          * initialized to zero since they *are* anonymous mapped
816          * pages, however we have to take care with the contents that
817          * come from the remaining part of the previous page: it may
818          * contains garbage data due to a previous heap usage (grown
819          * then shrunken).  */
820         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
821 
822         target_brk = new_brk;
823         brk_page = HOST_PAGE_ALIGN(target_brk);
824         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
825             target_brk);
826         return target_brk;
827     } else if (mapped_addr != -1) {
828         /* Mapped but at wrong address, meaning there wasn't actually
829          * enough space for this brk.
830          */
831         target_munmap(mapped_addr, new_alloc_size);
832         mapped_addr = -1;
833         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
834     }
835     else {
836         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
837     }
838 
839 #if defined(TARGET_ALPHA)
840     /* We (partially) emulate OSF/1 on Alpha, which requires we
841        return a proper errno, not an unchanged brk value.  */
842     return -TARGET_ENOMEM;
843 #endif
844     /* For everything else, return the previous break. */
845     return target_brk;
846 }
847 
848 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
849     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
850 static inline abi_long copy_from_user_fdset(fd_set *fds,
851                                             abi_ulong target_fds_addr,
852                                             int n)
853 {
854     int i, nw, j, k;
855     abi_ulong b, *target_fds;
856 
857     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
858     if (!(target_fds = lock_user(VERIFY_READ,
859                                  target_fds_addr,
860                                  sizeof(abi_ulong) * nw,
861                                  1)))
862         return -TARGET_EFAULT;
863 
864     FD_ZERO(fds);
865     k = 0;
866     for (i = 0; i < nw; i++) {
867         /* grab the abi_ulong */
868         __get_user(b, &target_fds[i]);
869         for (j = 0; j < TARGET_ABI_BITS; j++) {
870             /* check the bit inside the abi_ulong */
871             if ((b >> j) & 1)
872                 FD_SET(k, fds);
873             k++;
874         }
875     }
876 
877     unlock_user(target_fds, target_fds_addr, 0);
878 
879     return 0;
880 }
881 
882 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
883                                                  abi_ulong target_fds_addr,
884                                                  int n)
885 {
886     if (target_fds_addr) {
887         if (copy_from_user_fdset(fds, target_fds_addr, n))
888             return -TARGET_EFAULT;
889         *fds_ptr = fds;
890     } else {
891         *fds_ptr = NULL;
892     }
893     return 0;
894 }
895 
896 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
897                                           const fd_set *fds,
898                                           int n)
899 {
900     int i, nw, j, k;
901     abi_long v;
902     abi_ulong *target_fds;
903 
904     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
905     if (!(target_fds = lock_user(VERIFY_WRITE,
906                                  target_fds_addr,
907                                  sizeof(abi_ulong) * nw,
908                                  0)))
909         return -TARGET_EFAULT;
910 
911     k = 0;
912     for (i = 0; i < nw; i++) {
913         v = 0;
914         for (j = 0; j < TARGET_ABI_BITS; j++) {
915             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
916             k++;
917         }
918         __put_user(v, &target_fds[i]);
919     }
920 
921     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
922 
923     return 0;
924 }
925 #endif
926 
927 #if defined(__alpha__)
928 #define HOST_HZ 1024
929 #else
930 #define HOST_HZ 100
931 #endif
932 
933 static inline abi_long host_to_target_clock_t(long ticks)
934 {
935 #if HOST_HZ == TARGET_HZ
936     return ticks;
937 #else
938     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
939 #endif
940 }
941 
942 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
943                                              const struct rusage *rusage)
944 {
945     struct target_rusage *target_rusage;
946 
947     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
948         return -TARGET_EFAULT;
949     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
950     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
951     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
952     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
953     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
954     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
955     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
956     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
957     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
958     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
959     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
960     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
961     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
962     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
963     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
964     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
965     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
966     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
967     unlock_user_struct(target_rusage, target_addr, 1);
968 
969     return 0;
970 }
971 
972 #ifdef TARGET_NR_setrlimit
973 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
974 {
975     abi_ulong target_rlim_swap;
976     rlim_t result;
977 
978     target_rlim_swap = tswapal(target_rlim);
979     if (target_rlim_swap == TARGET_RLIM_INFINITY)
980         return RLIM_INFINITY;
981 
982     result = target_rlim_swap;
983     if (target_rlim_swap != (rlim_t)result)
984         return RLIM_INFINITY;
985 
986     return result;
987 }
988 #endif
989 
990 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
991 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
992 {
993     abi_ulong target_rlim_swap;
994     abi_ulong result;
995 
996     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
997         target_rlim_swap = TARGET_RLIM_INFINITY;
998     else
999         target_rlim_swap = rlim;
1000     result = tswapal(target_rlim_swap);
1001 
1002     return result;
1003 }
1004 #endif
1005 
1006 static inline int target_to_host_resource(int code)
1007 {
1008     switch (code) {
1009     case TARGET_RLIMIT_AS:
1010         return RLIMIT_AS;
1011     case TARGET_RLIMIT_CORE:
1012         return RLIMIT_CORE;
1013     case TARGET_RLIMIT_CPU:
1014         return RLIMIT_CPU;
1015     case TARGET_RLIMIT_DATA:
1016         return RLIMIT_DATA;
1017     case TARGET_RLIMIT_FSIZE:
1018         return RLIMIT_FSIZE;
1019     case TARGET_RLIMIT_LOCKS:
1020         return RLIMIT_LOCKS;
1021     case TARGET_RLIMIT_MEMLOCK:
1022         return RLIMIT_MEMLOCK;
1023     case TARGET_RLIMIT_MSGQUEUE:
1024         return RLIMIT_MSGQUEUE;
1025     case TARGET_RLIMIT_NICE:
1026         return RLIMIT_NICE;
1027     case TARGET_RLIMIT_NOFILE:
1028         return RLIMIT_NOFILE;
1029     case TARGET_RLIMIT_NPROC:
1030         return RLIMIT_NPROC;
1031     case TARGET_RLIMIT_RSS:
1032         return RLIMIT_RSS;
1033     case TARGET_RLIMIT_RTPRIO:
1034         return RLIMIT_RTPRIO;
1035     case TARGET_RLIMIT_SIGPENDING:
1036         return RLIMIT_SIGPENDING;
1037     case TARGET_RLIMIT_STACK:
1038         return RLIMIT_STACK;
1039     default:
1040         return code;
1041     }
1042 }
1043 
1044 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1045                                               abi_ulong target_tv_addr)
1046 {
1047     struct target_timeval *target_tv;
1048 
1049     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1050         return -TARGET_EFAULT;
1051     }
1052 
1053     __get_user(tv->tv_sec, &target_tv->tv_sec);
1054     __get_user(tv->tv_usec, &target_tv->tv_usec);
1055 
1056     unlock_user_struct(target_tv, target_tv_addr, 0);
1057 
1058     return 0;
1059 }
1060 
1061 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1062                                             const struct timeval *tv)
1063 {
1064     struct target_timeval *target_tv;
1065 
1066     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1067         return -TARGET_EFAULT;
1068     }
1069 
1070     __put_user(tv->tv_sec, &target_tv->tv_sec);
1071     __put_user(tv->tv_usec, &target_tv->tv_usec);
1072 
1073     unlock_user_struct(target_tv, target_tv_addr, 1);
1074 
1075     return 0;
1076 }
1077 
1078 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1079 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1080                                                 abi_ulong target_tv_addr)
1081 {
1082     struct target__kernel_sock_timeval *target_tv;
1083 
1084     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1085         return -TARGET_EFAULT;
1086     }
1087 
1088     __get_user(tv->tv_sec, &target_tv->tv_sec);
1089     __get_user(tv->tv_usec, &target_tv->tv_usec);
1090 
1091     unlock_user_struct(target_tv, target_tv_addr, 0);
1092 
1093     return 0;
1094 }
1095 #endif
1096 
1097 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1098                                               const struct timeval *tv)
1099 {
1100     struct target__kernel_sock_timeval *target_tv;
1101 
1102     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1103         return -TARGET_EFAULT;
1104     }
1105 
1106     __put_user(tv->tv_sec, &target_tv->tv_sec);
1107     __put_user(tv->tv_usec, &target_tv->tv_usec);
1108 
1109     unlock_user_struct(target_tv, target_tv_addr, 1);
1110 
1111     return 0;
1112 }
1113 
1114 #if defined(TARGET_NR_futex) || \
1115     defined(TARGET_NR_rt_sigtimedwait) || \
1116     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1117     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1118     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1119     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1120     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1121     defined(TARGET_NR_timer_settime) || \
1122     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1123 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1124                                                abi_ulong target_addr)
1125 {
1126     struct target_timespec *target_ts;
1127 
1128     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1129         return -TARGET_EFAULT;
1130     }
1131     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1132     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1133     unlock_user_struct(target_ts, target_addr, 0);
1134     return 0;
1135 }
1136 #endif
1137 
1138 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1139     defined(TARGET_NR_timer_settime64) || \
1140     defined(TARGET_NR_mq_timedsend_time64) || \
1141     defined(TARGET_NR_mq_timedreceive_time64) || \
1142     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1143     defined(TARGET_NR_clock_nanosleep_time64) || \
1144     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1145     defined(TARGET_NR_utimensat) || \
1146     defined(TARGET_NR_utimensat_time64) || \
1147     defined(TARGET_NR_semtimedop_time64) || \
1148     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1149 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1150                                                  abi_ulong target_addr)
1151 {
1152     struct target__kernel_timespec *target_ts;
1153 
1154     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1155         return -TARGET_EFAULT;
1156     }
1157     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1158     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1159     /* in 32bit mode, this drops the padding */
1160     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1161     unlock_user_struct(target_ts, target_addr, 0);
1162     return 0;
1163 }
1164 #endif
1165 
1166 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1167                                                struct timespec *host_ts)
1168 {
1169     struct target_timespec *target_ts;
1170 
1171     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1172         return -TARGET_EFAULT;
1173     }
1174     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1175     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1176     unlock_user_struct(target_ts, target_addr, 1);
1177     return 0;
1178 }
1179 
1180 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1181                                                  struct timespec *host_ts)
1182 {
1183     struct target__kernel_timespec *target_ts;
1184 
1185     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1186         return -TARGET_EFAULT;
1187     }
1188     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1189     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1190     unlock_user_struct(target_ts, target_addr, 1);
1191     return 0;
1192 }
1193 
1194 #if defined(TARGET_NR_gettimeofday)
1195 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1196                                              struct timezone *tz)
1197 {
1198     struct target_timezone *target_tz;
1199 
1200     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1201         return -TARGET_EFAULT;
1202     }
1203 
1204     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1205     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1206 
1207     unlock_user_struct(target_tz, target_tz_addr, 1);
1208 
1209     return 0;
1210 }
1211 #endif
1212 
1213 #if defined(TARGET_NR_settimeofday)
1214 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1215                                                abi_ulong target_tz_addr)
1216 {
1217     struct target_timezone *target_tz;
1218 
1219     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1220         return -TARGET_EFAULT;
1221     }
1222 
1223     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1224     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1225 
1226     unlock_user_struct(target_tz, target_tz_addr, 0);
1227 
1228     return 0;
1229 }
1230 #endif
1231 
1232 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1233 #include <mqueue.h>
1234 
1235 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1236                                               abi_ulong target_mq_attr_addr)
1237 {
1238     struct target_mq_attr *target_mq_attr;
1239 
1240     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1241                           target_mq_attr_addr, 1))
1242         return -TARGET_EFAULT;
1243 
1244     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1245     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1246     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1247     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1248 
1249     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1250 
1251     return 0;
1252 }
1253 
1254 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1255                                             const struct mq_attr *attr)
1256 {
1257     struct target_mq_attr *target_mq_attr;
1258 
1259     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1260                           target_mq_attr_addr, 0))
1261         return -TARGET_EFAULT;
1262 
1263     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1264     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1265     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1266     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1267 
1268     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1269 
1270     return 0;
1271 }
1272 #endif
1273 
1274 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1275 /* do_select() must return target values and target errnos. */
1276 static abi_long do_select(int n,
1277                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1278                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1279 {
1280     fd_set rfds, wfds, efds;
1281     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1282     struct timeval tv;
1283     struct timespec ts, *ts_ptr;
1284     abi_long ret;
1285 
1286     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1287     if (ret) {
1288         return ret;
1289     }
1290     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1291     if (ret) {
1292         return ret;
1293     }
1294     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1295     if (ret) {
1296         return ret;
1297     }
1298 
1299     if (target_tv_addr) {
1300         if (copy_from_user_timeval(&tv, target_tv_addr))
1301             return -TARGET_EFAULT;
1302         ts.tv_sec = tv.tv_sec;
1303         ts.tv_nsec = tv.tv_usec * 1000;
1304         ts_ptr = &ts;
1305     } else {
1306         ts_ptr = NULL;
1307     }
1308 
1309     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1310                                   ts_ptr, NULL));
1311 
1312     if (!is_error(ret)) {
1313         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1314             return -TARGET_EFAULT;
1315         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1316             return -TARGET_EFAULT;
1317         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1318             return -TARGET_EFAULT;
1319 
1320         if (target_tv_addr) {
1321             tv.tv_sec = ts.tv_sec;
1322             tv.tv_usec = ts.tv_nsec / 1000;
1323             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1324                 return -TARGET_EFAULT;
1325             }
1326         }
1327     }
1328 
1329     return ret;
1330 }
1331 
1332 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1333 static abi_long do_old_select(abi_ulong arg1)
1334 {
1335     struct target_sel_arg_struct *sel;
1336     abi_ulong inp, outp, exp, tvp;
1337     long nsel;
1338 
1339     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1340         return -TARGET_EFAULT;
1341     }
1342 
1343     nsel = tswapal(sel->n);
1344     inp = tswapal(sel->inp);
1345     outp = tswapal(sel->outp);
1346     exp = tswapal(sel->exp);
1347     tvp = tswapal(sel->tvp);
1348 
1349     unlock_user_struct(sel, arg1, 0);
1350 
1351     return do_select(nsel, inp, outp, exp, tvp);
1352 }
1353 #endif
1354 #endif
1355 
1356 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1357 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1358                             abi_long arg4, abi_long arg5, abi_long arg6,
1359                             bool time64)
1360 {
1361     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1362     fd_set rfds, wfds, efds;
1363     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1364     struct timespec ts, *ts_ptr;
1365     abi_long ret;
1366 
1367     /*
1368      * The 6th arg is actually two args smashed together,
1369      * so we cannot use the C library.
1370      */
1371     sigset_t set;
1372     struct {
1373         sigset_t *set;
1374         size_t size;
1375     } sig, *sig_ptr;
1376 
1377     abi_ulong arg_sigset, arg_sigsize, *arg7;
1378     target_sigset_t *target_sigset;
1379 
1380     n = arg1;
1381     rfd_addr = arg2;
1382     wfd_addr = arg3;
1383     efd_addr = arg4;
1384     ts_addr = arg5;
1385 
1386     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1387     if (ret) {
1388         return ret;
1389     }
1390     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1391     if (ret) {
1392         return ret;
1393     }
1394     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1395     if (ret) {
1396         return ret;
1397     }
1398 
1399     /*
1400      * This takes a timespec, and not a timeval, so we cannot
1401      * use the do_select() helper ...
1402      */
1403     if (ts_addr) {
1404         if (time64) {
1405             if (target_to_host_timespec64(&ts, ts_addr)) {
1406                 return -TARGET_EFAULT;
1407             }
1408         } else {
1409             if (target_to_host_timespec(&ts, ts_addr)) {
1410                 return -TARGET_EFAULT;
1411             }
1412         }
1413             ts_ptr = &ts;
1414     } else {
1415         ts_ptr = NULL;
1416     }
1417 
1418     /* Extract the two packed args for the sigset */
1419     if (arg6) {
1420         sig_ptr = &sig;
1421         sig.size = SIGSET_T_SIZE;
1422 
1423         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1424         if (!arg7) {
1425             return -TARGET_EFAULT;
1426         }
1427         arg_sigset = tswapal(arg7[0]);
1428         arg_sigsize = tswapal(arg7[1]);
1429         unlock_user(arg7, arg6, 0);
1430 
1431         if (arg_sigset) {
1432             sig.set = &set;
1433             if (arg_sigsize != sizeof(*target_sigset)) {
1434                 /* Like the kernel, we enforce correct size sigsets */
1435                 return -TARGET_EINVAL;
1436             }
1437             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1438                                       sizeof(*target_sigset), 1);
1439             if (!target_sigset) {
1440                 return -TARGET_EFAULT;
1441             }
1442             target_to_host_sigset(&set, target_sigset);
1443             unlock_user(target_sigset, arg_sigset, 0);
1444         } else {
1445             sig.set = NULL;
1446         }
1447     } else {
1448         sig_ptr = NULL;
1449     }
1450 
1451     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1452                                   ts_ptr, sig_ptr));
1453 
1454     if (!is_error(ret)) {
1455         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1456             return -TARGET_EFAULT;
1457         }
1458         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1459             return -TARGET_EFAULT;
1460         }
1461         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1462             return -TARGET_EFAULT;
1463         }
1464         if (time64) {
1465             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1466                 return -TARGET_EFAULT;
1467             }
1468         } else {
1469             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1470                 return -TARGET_EFAULT;
1471             }
1472         }
1473     }
1474     return ret;
1475 }
1476 #endif
1477 
1478 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1479     defined(TARGET_NR_ppoll_time64)
1480 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1481                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1482 {
1483     struct target_pollfd *target_pfd;
1484     unsigned int nfds = arg2;
1485     struct pollfd *pfd;
1486     unsigned int i;
1487     abi_long ret;
1488 
1489     pfd = NULL;
1490     target_pfd = NULL;
1491     if (nfds) {
1492         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1493             return -TARGET_EINVAL;
1494         }
1495         target_pfd = lock_user(VERIFY_WRITE, arg1,
1496                                sizeof(struct target_pollfd) * nfds, 1);
1497         if (!target_pfd) {
1498             return -TARGET_EFAULT;
1499         }
1500 
1501         pfd = alloca(sizeof(struct pollfd) * nfds);
1502         for (i = 0; i < nfds; i++) {
1503             pfd[i].fd = tswap32(target_pfd[i].fd);
1504             pfd[i].events = tswap16(target_pfd[i].events);
1505         }
1506     }
1507     if (ppoll) {
1508         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1509         target_sigset_t *target_set;
1510         sigset_t _set, *set = &_set;
1511 
1512         if (arg3) {
1513             if (time64) {
1514                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1515                     unlock_user(target_pfd, arg1, 0);
1516                     return -TARGET_EFAULT;
1517                 }
1518             } else {
1519                 if (target_to_host_timespec(timeout_ts, arg3)) {
1520                     unlock_user(target_pfd, arg1, 0);
1521                     return -TARGET_EFAULT;
1522                 }
1523             }
1524         } else {
1525             timeout_ts = NULL;
1526         }
1527 
1528         if (arg4) {
1529             if (arg5 != sizeof(target_sigset_t)) {
1530                 unlock_user(target_pfd, arg1, 0);
1531                 return -TARGET_EINVAL;
1532             }
1533 
1534             target_set = lock_user(VERIFY_READ, arg4,
1535                                    sizeof(target_sigset_t), 1);
1536             if (!target_set) {
1537                 unlock_user(target_pfd, arg1, 0);
1538                 return -TARGET_EFAULT;
1539             }
1540             target_to_host_sigset(set, target_set);
1541         } else {
1542             set = NULL;
1543         }
1544 
1545         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1546                                    set, SIGSET_T_SIZE));
1547 
1548         if (!is_error(ret) && arg3) {
1549             if (time64) {
1550                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1551                     return -TARGET_EFAULT;
1552                 }
1553             } else {
1554                 if (host_to_target_timespec(arg3, timeout_ts)) {
1555                     return -TARGET_EFAULT;
1556                 }
1557             }
1558         }
1559         if (arg4) {
1560             unlock_user(target_set, arg4, 0);
1561         }
1562     } else {
1563           struct timespec ts, *pts;
1564 
1565           if (arg3 >= 0) {
1566               /* Convert ms to secs, ns */
1567               ts.tv_sec = arg3 / 1000;
1568               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1569               pts = &ts;
1570           } else {
1571               /* -ve poll() timeout means "infinite" */
1572               pts = NULL;
1573           }
1574           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1575     }
1576 
1577     if (!is_error(ret)) {
1578         for (i = 0; i < nfds; i++) {
1579             target_pfd[i].revents = tswap16(pfd[i].revents);
1580         }
1581     }
1582     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1583     return ret;
1584 }
1585 #endif
1586 
1587 static abi_long do_pipe2(int host_pipe[], int flags)
1588 {
1589 #ifdef CONFIG_PIPE2
1590     return pipe2(host_pipe, flags);
1591 #else
1592     return -ENOSYS;
1593 #endif
1594 }
1595 
1596 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1597                         int flags, int is_pipe2)
1598 {
1599     int host_pipe[2];
1600     abi_long ret;
1601     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1602 
1603     if (is_error(ret))
1604         return get_errno(ret);
1605 
1606     /* Several targets have special calling conventions for the original
1607        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1608     if (!is_pipe2) {
1609 #if defined(TARGET_ALPHA)
1610         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1611         return host_pipe[0];
1612 #elif defined(TARGET_MIPS)
1613         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1614         return host_pipe[0];
1615 #elif defined(TARGET_SH4)
1616         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1617         return host_pipe[0];
1618 #elif defined(TARGET_SPARC)
1619         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1620         return host_pipe[0];
1621 #endif
1622     }
1623 
1624     if (put_user_s32(host_pipe[0], pipedes)
1625         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1626         return -TARGET_EFAULT;
1627     return get_errno(ret);
1628 }
1629 
1630 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1631                                               abi_ulong target_addr,
1632                                               socklen_t len)
1633 {
1634     struct target_ip_mreqn *target_smreqn;
1635 
1636     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1637     if (!target_smreqn)
1638         return -TARGET_EFAULT;
1639     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1640     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1641     if (len == sizeof(struct target_ip_mreqn))
1642         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1643     unlock_user(target_smreqn, target_addr, 0);
1644 
1645     return 0;
1646 }
1647 
1648 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1649                                                abi_ulong target_addr,
1650                                                socklen_t len)
1651 {
1652     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1653     sa_family_t sa_family;
1654     struct target_sockaddr *target_saddr;
1655 
1656     if (fd_trans_target_to_host_addr(fd)) {
1657         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1658     }
1659 
1660     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1661     if (!target_saddr)
1662         return -TARGET_EFAULT;
1663 
1664     sa_family = tswap16(target_saddr->sa_family);
1665 
1666     /* Oops. The caller might send a incomplete sun_path; sun_path
1667      * must be terminated by \0 (see the manual page), but
1668      * unfortunately it is quite common to specify sockaddr_un
1669      * length as "strlen(x->sun_path)" while it should be
1670      * "strlen(...) + 1". We'll fix that here if needed.
1671      * Linux kernel has a similar feature.
1672      */
1673 
1674     if (sa_family == AF_UNIX) {
1675         if (len < unix_maxlen && len > 0) {
1676             char *cp = (char*)target_saddr;
1677 
1678             if ( cp[len-1] && !cp[len] )
1679                 len++;
1680         }
1681         if (len > unix_maxlen)
1682             len = unix_maxlen;
1683     }
1684 
1685     memcpy(addr, target_saddr, len);
1686     addr->sa_family = sa_family;
1687     if (sa_family == AF_NETLINK) {
1688         struct sockaddr_nl *nladdr;
1689 
1690         nladdr = (struct sockaddr_nl *)addr;
1691         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1692         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1693     } else if (sa_family == AF_PACKET) {
1694 	struct target_sockaddr_ll *lladdr;
1695 
1696 	lladdr = (struct target_sockaddr_ll *)addr;
1697 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1698 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1699     }
1700     unlock_user(target_saddr, target_addr, 0);
1701 
1702     return 0;
1703 }
1704 
1705 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1706                                                struct sockaddr *addr,
1707                                                socklen_t len)
1708 {
1709     struct target_sockaddr *target_saddr;
1710 
1711     if (len == 0) {
1712         return 0;
1713     }
1714     assert(addr);
1715 
1716     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1717     if (!target_saddr)
1718         return -TARGET_EFAULT;
1719     memcpy(target_saddr, addr, len);
1720     if (len >= offsetof(struct target_sockaddr, sa_family) +
1721         sizeof(target_saddr->sa_family)) {
1722         target_saddr->sa_family = tswap16(addr->sa_family);
1723     }
1724     if (addr->sa_family == AF_NETLINK &&
1725         len >= sizeof(struct target_sockaddr_nl)) {
1726         struct target_sockaddr_nl *target_nl =
1727                (struct target_sockaddr_nl *)target_saddr;
1728         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1729         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1730     } else if (addr->sa_family == AF_PACKET) {
1731         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1732         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1733         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1734     } else if (addr->sa_family == AF_INET6 &&
1735                len >= sizeof(struct target_sockaddr_in6)) {
1736         struct target_sockaddr_in6 *target_in6 =
1737                (struct target_sockaddr_in6 *)target_saddr;
1738         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1739     }
1740     unlock_user(target_saddr, target_addr, len);
1741 
1742     return 0;
1743 }
1744 
1745 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1746                                            struct target_msghdr *target_msgh)
1747 {
1748     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1749     abi_long msg_controllen;
1750     abi_ulong target_cmsg_addr;
1751     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1752     socklen_t space = 0;
1753 
1754     msg_controllen = tswapal(target_msgh->msg_controllen);
1755     if (msg_controllen < sizeof (struct target_cmsghdr))
1756         goto the_end;
1757     target_cmsg_addr = tswapal(target_msgh->msg_control);
1758     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1759     target_cmsg_start = target_cmsg;
1760     if (!target_cmsg)
1761         return -TARGET_EFAULT;
1762 
1763     while (cmsg && target_cmsg) {
1764         void *data = CMSG_DATA(cmsg);
1765         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1766 
1767         int len = tswapal(target_cmsg->cmsg_len)
1768             - sizeof(struct target_cmsghdr);
1769 
1770         space += CMSG_SPACE(len);
1771         if (space > msgh->msg_controllen) {
1772             space -= CMSG_SPACE(len);
1773             /* This is a QEMU bug, since we allocated the payload
1774              * area ourselves (unlike overflow in host-to-target
1775              * conversion, which is just the guest giving us a buffer
1776              * that's too small). It can't happen for the payload types
1777              * we currently support; if it becomes an issue in future
1778              * we would need to improve our allocation strategy to
1779              * something more intelligent than "twice the size of the
1780              * target buffer we're reading from".
1781              */
1782             qemu_log_mask(LOG_UNIMP,
1783                           ("Unsupported ancillary data %d/%d: "
1784                            "unhandled msg size\n"),
1785                           tswap32(target_cmsg->cmsg_level),
1786                           tswap32(target_cmsg->cmsg_type));
1787             break;
1788         }
1789 
1790         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1791             cmsg->cmsg_level = SOL_SOCKET;
1792         } else {
1793             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1794         }
1795         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1796         cmsg->cmsg_len = CMSG_LEN(len);
1797 
1798         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1799             int *fd = (int *)data;
1800             int *target_fd = (int *)target_data;
1801             int i, numfds = len / sizeof(int);
1802 
1803             for (i = 0; i < numfds; i++) {
1804                 __get_user(fd[i], target_fd + i);
1805             }
1806         } else if (cmsg->cmsg_level == SOL_SOCKET
1807                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1808             struct ucred *cred = (struct ucred *)data;
1809             struct target_ucred *target_cred =
1810                 (struct target_ucred *)target_data;
1811 
1812             __get_user(cred->pid, &target_cred->pid);
1813             __get_user(cred->uid, &target_cred->uid);
1814             __get_user(cred->gid, &target_cred->gid);
1815         } else {
1816             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1817                           cmsg->cmsg_level, cmsg->cmsg_type);
1818             memcpy(data, target_data, len);
1819         }
1820 
1821         cmsg = CMSG_NXTHDR(msgh, cmsg);
1822         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1823                                          target_cmsg_start);
1824     }
1825     unlock_user(target_cmsg, target_cmsg_addr, 0);
1826  the_end:
1827     msgh->msg_controllen = space;
1828     return 0;
1829 }
1830 
1831 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1832                                            struct msghdr *msgh)
1833 {
1834     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1835     abi_long msg_controllen;
1836     abi_ulong target_cmsg_addr;
1837     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1838     socklen_t space = 0;
1839 
1840     msg_controllen = tswapal(target_msgh->msg_controllen);
1841     if (msg_controllen < sizeof (struct target_cmsghdr))
1842         goto the_end;
1843     target_cmsg_addr = tswapal(target_msgh->msg_control);
1844     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1845     target_cmsg_start = target_cmsg;
1846     if (!target_cmsg)
1847         return -TARGET_EFAULT;
1848 
1849     while (cmsg && target_cmsg) {
1850         void *data = CMSG_DATA(cmsg);
1851         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1852 
1853         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1854         int tgt_len, tgt_space;
1855 
1856         /* We never copy a half-header but may copy half-data;
1857          * this is Linux's behaviour in put_cmsg(). Note that
1858          * truncation here is a guest problem (which we report
1859          * to the guest via the CTRUNC bit), unlike truncation
1860          * in target_to_host_cmsg, which is a QEMU bug.
1861          */
1862         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1863             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1864             break;
1865         }
1866 
1867         if (cmsg->cmsg_level == SOL_SOCKET) {
1868             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1869         } else {
1870             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1871         }
1872         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1873 
1874         /* Payload types which need a different size of payload on
1875          * the target must adjust tgt_len here.
1876          */
1877         tgt_len = len;
1878         switch (cmsg->cmsg_level) {
1879         case SOL_SOCKET:
1880             switch (cmsg->cmsg_type) {
1881             case SO_TIMESTAMP:
1882                 tgt_len = sizeof(struct target_timeval);
1883                 break;
1884             default:
1885                 break;
1886             }
1887             break;
1888         default:
1889             break;
1890         }
1891 
1892         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1893             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1894             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1895         }
1896 
1897         /* We must now copy-and-convert len bytes of payload
1898          * into tgt_len bytes of destination space. Bear in mind
1899          * that in both source and destination we may be dealing
1900          * with a truncated value!
1901          */
1902         switch (cmsg->cmsg_level) {
1903         case SOL_SOCKET:
1904             switch (cmsg->cmsg_type) {
1905             case SCM_RIGHTS:
1906             {
1907                 int *fd = (int *)data;
1908                 int *target_fd = (int *)target_data;
1909                 int i, numfds = tgt_len / sizeof(int);
1910 
1911                 for (i = 0; i < numfds; i++) {
1912                     __put_user(fd[i], target_fd + i);
1913                 }
1914                 break;
1915             }
1916             case SO_TIMESTAMP:
1917             {
1918                 struct timeval *tv = (struct timeval *)data;
1919                 struct target_timeval *target_tv =
1920                     (struct target_timeval *)target_data;
1921 
1922                 if (len != sizeof(struct timeval) ||
1923                     tgt_len != sizeof(struct target_timeval)) {
1924                     goto unimplemented;
1925                 }
1926 
1927                 /* copy struct timeval to target */
1928                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1929                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1930                 break;
1931             }
1932             case SCM_CREDENTIALS:
1933             {
1934                 struct ucred *cred = (struct ucred *)data;
1935                 struct target_ucred *target_cred =
1936                     (struct target_ucred *)target_data;
1937 
1938                 __put_user(cred->pid, &target_cred->pid);
1939                 __put_user(cred->uid, &target_cred->uid);
1940                 __put_user(cred->gid, &target_cred->gid);
1941                 break;
1942             }
1943             default:
1944                 goto unimplemented;
1945             }
1946             break;
1947 
1948         case SOL_IP:
1949             switch (cmsg->cmsg_type) {
1950             case IP_TTL:
1951             {
1952                 uint32_t *v = (uint32_t *)data;
1953                 uint32_t *t_int = (uint32_t *)target_data;
1954 
1955                 if (len != sizeof(uint32_t) ||
1956                     tgt_len != sizeof(uint32_t)) {
1957                     goto unimplemented;
1958                 }
1959                 __put_user(*v, t_int);
1960                 break;
1961             }
1962             case IP_RECVERR:
1963             {
1964                 struct errhdr_t {
1965                    struct sock_extended_err ee;
1966                    struct sockaddr_in offender;
1967                 };
1968                 struct errhdr_t *errh = (struct errhdr_t *)data;
1969                 struct errhdr_t *target_errh =
1970                     (struct errhdr_t *)target_data;
1971 
1972                 if (len != sizeof(struct errhdr_t) ||
1973                     tgt_len != sizeof(struct errhdr_t)) {
1974                     goto unimplemented;
1975                 }
1976                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1977                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1978                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1979                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1980                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1981                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1982                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1983                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1984                     (void *) &errh->offender, sizeof(errh->offender));
1985                 break;
1986             }
1987             default:
1988                 goto unimplemented;
1989             }
1990             break;
1991 
1992         case SOL_IPV6:
1993             switch (cmsg->cmsg_type) {
1994             case IPV6_HOPLIMIT:
1995             {
1996                 uint32_t *v = (uint32_t *)data;
1997                 uint32_t *t_int = (uint32_t *)target_data;
1998 
1999                 if (len != sizeof(uint32_t) ||
2000                     tgt_len != sizeof(uint32_t)) {
2001                     goto unimplemented;
2002                 }
2003                 __put_user(*v, t_int);
2004                 break;
2005             }
2006             case IPV6_RECVERR:
2007             {
2008                 struct errhdr6_t {
2009                    struct sock_extended_err ee;
2010                    struct sockaddr_in6 offender;
2011                 };
2012                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2013                 struct errhdr6_t *target_errh =
2014                     (struct errhdr6_t *)target_data;
2015 
2016                 if (len != sizeof(struct errhdr6_t) ||
2017                     tgt_len != sizeof(struct errhdr6_t)) {
2018                     goto unimplemented;
2019                 }
2020                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2021                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2022                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2023                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2024                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2025                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2026                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2027                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2028                     (void *) &errh->offender, sizeof(errh->offender));
2029                 break;
2030             }
2031             default:
2032                 goto unimplemented;
2033             }
2034             break;
2035 
2036         default:
2037         unimplemented:
2038             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2039                           cmsg->cmsg_level, cmsg->cmsg_type);
2040             memcpy(target_data, data, MIN(len, tgt_len));
2041             if (tgt_len > len) {
2042                 memset(target_data + len, 0, tgt_len - len);
2043             }
2044         }
2045 
2046         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2047         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2048         if (msg_controllen < tgt_space) {
2049             tgt_space = msg_controllen;
2050         }
2051         msg_controllen -= tgt_space;
2052         space += tgt_space;
2053         cmsg = CMSG_NXTHDR(msgh, cmsg);
2054         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2055                                          target_cmsg_start);
2056     }
2057     unlock_user(target_cmsg, target_cmsg_addr, space);
2058  the_end:
2059     target_msgh->msg_controllen = tswapal(space);
2060     return 0;
2061 }
2062 
2063 /* do_setsockopt() Must return target values and target errnos. */
2064 static abi_long do_setsockopt(int sockfd, int level, int optname,
2065                               abi_ulong optval_addr, socklen_t optlen)
2066 {
2067     abi_long ret;
2068     int val;
2069     struct ip_mreqn *ip_mreq;
2070     struct ip_mreq_source *ip_mreq_source;
2071 
2072     switch(level) {
2073     case SOL_TCP:
2074     case SOL_UDP:
2075         /* TCP and UDP options all take an 'int' value.  */
2076         if (optlen < sizeof(uint32_t))
2077             return -TARGET_EINVAL;
2078 
2079         if (get_user_u32(val, optval_addr))
2080             return -TARGET_EFAULT;
2081         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2082         break;
2083     case SOL_IP:
2084         switch(optname) {
2085         case IP_TOS:
2086         case IP_TTL:
2087         case IP_HDRINCL:
2088         case IP_ROUTER_ALERT:
2089         case IP_RECVOPTS:
2090         case IP_RETOPTS:
2091         case IP_PKTINFO:
2092         case IP_MTU_DISCOVER:
2093         case IP_RECVERR:
2094         case IP_RECVTTL:
2095         case IP_RECVTOS:
2096 #ifdef IP_FREEBIND
2097         case IP_FREEBIND:
2098 #endif
2099         case IP_MULTICAST_TTL:
2100         case IP_MULTICAST_LOOP:
2101             val = 0;
2102             if (optlen >= sizeof(uint32_t)) {
2103                 if (get_user_u32(val, optval_addr))
2104                     return -TARGET_EFAULT;
2105             } else if (optlen >= 1) {
2106                 if (get_user_u8(val, optval_addr))
2107                     return -TARGET_EFAULT;
2108             }
2109             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2110             break;
2111         case IP_ADD_MEMBERSHIP:
2112         case IP_DROP_MEMBERSHIP:
2113             if (optlen < sizeof (struct target_ip_mreq) ||
2114                 optlen > sizeof (struct target_ip_mreqn))
2115                 return -TARGET_EINVAL;
2116 
2117             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2118             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2119             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2120             break;
2121 
2122         case IP_BLOCK_SOURCE:
2123         case IP_UNBLOCK_SOURCE:
2124         case IP_ADD_SOURCE_MEMBERSHIP:
2125         case IP_DROP_SOURCE_MEMBERSHIP:
2126             if (optlen != sizeof (struct target_ip_mreq_source))
2127                 return -TARGET_EINVAL;
2128 
2129             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2130             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2131             unlock_user (ip_mreq_source, optval_addr, 0);
2132             break;
2133 
2134         default:
2135             goto unimplemented;
2136         }
2137         break;
2138     case SOL_IPV6:
2139         switch (optname) {
2140         case IPV6_MTU_DISCOVER:
2141         case IPV6_MTU:
2142         case IPV6_V6ONLY:
2143         case IPV6_RECVPKTINFO:
2144         case IPV6_UNICAST_HOPS:
2145         case IPV6_MULTICAST_HOPS:
2146         case IPV6_MULTICAST_LOOP:
2147         case IPV6_RECVERR:
2148         case IPV6_RECVHOPLIMIT:
2149         case IPV6_2292HOPLIMIT:
2150         case IPV6_CHECKSUM:
2151         case IPV6_ADDRFORM:
2152         case IPV6_2292PKTINFO:
2153         case IPV6_RECVTCLASS:
2154         case IPV6_RECVRTHDR:
2155         case IPV6_2292RTHDR:
2156         case IPV6_RECVHOPOPTS:
2157         case IPV6_2292HOPOPTS:
2158         case IPV6_RECVDSTOPTS:
2159         case IPV6_2292DSTOPTS:
2160         case IPV6_TCLASS:
2161         case IPV6_ADDR_PREFERENCES:
2162 #ifdef IPV6_RECVPATHMTU
2163         case IPV6_RECVPATHMTU:
2164 #endif
2165 #ifdef IPV6_TRANSPARENT
2166         case IPV6_TRANSPARENT:
2167 #endif
2168 #ifdef IPV6_FREEBIND
2169         case IPV6_FREEBIND:
2170 #endif
2171 #ifdef IPV6_RECVORIGDSTADDR
2172         case IPV6_RECVORIGDSTADDR:
2173 #endif
2174             val = 0;
2175             if (optlen < sizeof(uint32_t)) {
2176                 return -TARGET_EINVAL;
2177             }
2178             if (get_user_u32(val, optval_addr)) {
2179                 return -TARGET_EFAULT;
2180             }
2181             ret = get_errno(setsockopt(sockfd, level, optname,
2182                                        &val, sizeof(val)));
2183             break;
2184         case IPV6_PKTINFO:
2185         {
2186             struct in6_pktinfo pki;
2187 
2188             if (optlen < sizeof(pki)) {
2189                 return -TARGET_EINVAL;
2190             }
2191 
2192             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2193                 return -TARGET_EFAULT;
2194             }
2195 
2196             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2197 
2198             ret = get_errno(setsockopt(sockfd, level, optname,
2199                                        &pki, sizeof(pki)));
2200             break;
2201         }
2202         case IPV6_ADD_MEMBERSHIP:
2203         case IPV6_DROP_MEMBERSHIP:
2204         {
2205             struct ipv6_mreq ipv6mreq;
2206 
2207             if (optlen < sizeof(ipv6mreq)) {
2208                 return -TARGET_EINVAL;
2209             }
2210 
2211             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2212                 return -TARGET_EFAULT;
2213             }
2214 
2215             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2216 
2217             ret = get_errno(setsockopt(sockfd, level, optname,
2218                                        &ipv6mreq, sizeof(ipv6mreq)));
2219             break;
2220         }
2221         default:
2222             goto unimplemented;
2223         }
2224         break;
2225     case SOL_ICMPV6:
2226         switch (optname) {
2227         case ICMPV6_FILTER:
2228         {
2229             struct icmp6_filter icmp6f;
2230 
2231             if (optlen > sizeof(icmp6f)) {
2232                 optlen = sizeof(icmp6f);
2233             }
2234 
2235             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2236                 return -TARGET_EFAULT;
2237             }
2238 
2239             for (val = 0; val < 8; val++) {
2240                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2241             }
2242 
2243             ret = get_errno(setsockopt(sockfd, level, optname,
2244                                        &icmp6f, optlen));
2245             break;
2246         }
2247         default:
2248             goto unimplemented;
2249         }
2250         break;
2251     case SOL_RAW:
2252         switch (optname) {
2253         case ICMP_FILTER:
2254         case IPV6_CHECKSUM:
2255             /* those take an u32 value */
2256             if (optlen < sizeof(uint32_t)) {
2257                 return -TARGET_EINVAL;
2258             }
2259 
2260             if (get_user_u32(val, optval_addr)) {
2261                 return -TARGET_EFAULT;
2262             }
2263             ret = get_errno(setsockopt(sockfd, level, optname,
2264                                        &val, sizeof(val)));
2265             break;
2266 
2267         default:
2268             goto unimplemented;
2269         }
2270         break;
2271 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2272     case SOL_ALG:
2273         switch (optname) {
2274         case ALG_SET_KEY:
2275         {
2276             char *alg_key = g_malloc(optlen);
2277 
2278             if (!alg_key) {
2279                 return -TARGET_ENOMEM;
2280             }
2281             if (copy_from_user(alg_key, optval_addr, optlen)) {
2282                 g_free(alg_key);
2283                 return -TARGET_EFAULT;
2284             }
2285             ret = get_errno(setsockopt(sockfd, level, optname,
2286                                        alg_key, optlen));
2287             g_free(alg_key);
2288             break;
2289         }
2290         case ALG_SET_AEAD_AUTHSIZE:
2291         {
2292             ret = get_errno(setsockopt(sockfd, level, optname,
2293                                        NULL, optlen));
2294             break;
2295         }
2296         default:
2297             goto unimplemented;
2298         }
2299         break;
2300 #endif
2301     case TARGET_SOL_SOCKET:
2302         switch (optname) {
2303         case TARGET_SO_RCVTIMEO:
2304         {
2305                 struct timeval tv;
2306 
2307                 optname = SO_RCVTIMEO;
2308 
2309 set_timeout:
2310                 if (optlen != sizeof(struct target_timeval)) {
2311                     return -TARGET_EINVAL;
2312                 }
2313 
2314                 if (copy_from_user_timeval(&tv, optval_addr)) {
2315                     return -TARGET_EFAULT;
2316                 }
2317 
2318                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2319                                 &tv, sizeof(tv)));
2320                 return ret;
2321         }
2322         case TARGET_SO_SNDTIMEO:
2323                 optname = SO_SNDTIMEO;
2324                 goto set_timeout;
2325         case TARGET_SO_ATTACH_FILTER:
2326         {
2327                 struct target_sock_fprog *tfprog;
2328                 struct target_sock_filter *tfilter;
2329                 struct sock_fprog fprog;
2330                 struct sock_filter *filter;
2331                 int i;
2332 
2333                 if (optlen != sizeof(*tfprog)) {
2334                     return -TARGET_EINVAL;
2335                 }
2336                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2337                     return -TARGET_EFAULT;
2338                 }
2339                 if (!lock_user_struct(VERIFY_READ, tfilter,
2340                                       tswapal(tfprog->filter), 0)) {
2341                     unlock_user_struct(tfprog, optval_addr, 1);
2342                     return -TARGET_EFAULT;
2343                 }
2344 
2345                 fprog.len = tswap16(tfprog->len);
2346                 filter = g_try_new(struct sock_filter, fprog.len);
2347                 if (filter == NULL) {
2348                     unlock_user_struct(tfilter, tfprog->filter, 1);
2349                     unlock_user_struct(tfprog, optval_addr, 1);
2350                     return -TARGET_ENOMEM;
2351                 }
2352                 for (i = 0; i < fprog.len; i++) {
2353                     filter[i].code = tswap16(tfilter[i].code);
2354                     filter[i].jt = tfilter[i].jt;
2355                     filter[i].jf = tfilter[i].jf;
2356                     filter[i].k = tswap32(tfilter[i].k);
2357                 }
2358                 fprog.filter = filter;
2359 
2360                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2361                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2362                 g_free(filter);
2363 
2364                 unlock_user_struct(tfilter, tfprog->filter, 1);
2365                 unlock_user_struct(tfprog, optval_addr, 1);
2366                 return ret;
2367         }
2368 	case TARGET_SO_BINDTODEVICE:
2369 	{
2370 		char *dev_ifname, *addr_ifname;
2371 
2372 		if (optlen > IFNAMSIZ - 1) {
2373 		    optlen = IFNAMSIZ - 1;
2374 		}
2375 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2376 		if (!dev_ifname) {
2377 		    return -TARGET_EFAULT;
2378 		}
2379 		optname = SO_BINDTODEVICE;
2380 		addr_ifname = alloca(IFNAMSIZ);
2381 		memcpy(addr_ifname, dev_ifname, optlen);
2382 		addr_ifname[optlen] = 0;
2383 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2384                                            addr_ifname, optlen));
2385 		unlock_user (dev_ifname, optval_addr, 0);
2386 		return ret;
2387 	}
2388         case TARGET_SO_LINGER:
2389         {
2390                 struct linger lg;
2391                 struct target_linger *tlg;
2392 
2393                 if (optlen != sizeof(struct target_linger)) {
2394                     return -TARGET_EINVAL;
2395                 }
2396                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2397                     return -TARGET_EFAULT;
2398                 }
2399                 __get_user(lg.l_onoff, &tlg->l_onoff);
2400                 __get_user(lg.l_linger, &tlg->l_linger);
2401                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2402                                 &lg, sizeof(lg)));
2403                 unlock_user_struct(tlg, optval_addr, 0);
2404                 return ret;
2405         }
2406             /* Options with 'int' argument.  */
2407         case TARGET_SO_DEBUG:
2408 		optname = SO_DEBUG;
2409 		break;
2410         case TARGET_SO_REUSEADDR:
2411 		optname = SO_REUSEADDR;
2412 		break;
2413 #ifdef SO_REUSEPORT
2414         case TARGET_SO_REUSEPORT:
2415                 optname = SO_REUSEPORT;
2416                 break;
2417 #endif
2418         case TARGET_SO_TYPE:
2419 		optname = SO_TYPE;
2420 		break;
2421         case TARGET_SO_ERROR:
2422 		optname = SO_ERROR;
2423 		break;
2424         case TARGET_SO_DONTROUTE:
2425 		optname = SO_DONTROUTE;
2426 		break;
2427         case TARGET_SO_BROADCAST:
2428 		optname = SO_BROADCAST;
2429 		break;
2430         case TARGET_SO_SNDBUF:
2431 		optname = SO_SNDBUF;
2432 		break;
2433         case TARGET_SO_SNDBUFFORCE:
2434                 optname = SO_SNDBUFFORCE;
2435                 break;
2436         case TARGET_SO_RCVBUF:
2437 		optname = SO_RCVBUF;
2438 		break;
2439         case TARGET_SO_RCVBUFFORCE:
2440                 optname = SO_RCVBUFFORCE;
2441                 break;
2442         case TARGET_SO_KEEPALIVE:
2443 		optname = SO_KEEPALIVE;
2444 		break;
2445         case TARGET_SO_OOBINLINE:
2446 		optname = SO_OOBINLINE;
2447 		break;
2448         case TARGET_SO_NO_CHECK:
2449 		optname = SO_NO_CHECK;
2450 		break;
2451         case TARGET_SO_PRIORITY:
2452 		optname = SO_PRIORITY;
2453 		break;
2454 #ifdef SO_BSDCOMPAT
2455         case TARGET_SO_BSDCOMPAT:
2456 		optname = SO_BSDCOMPAT;
2457 		break;
2458 #endif
2459         case TARGET_SO_PASSCRED:
2460 		optname = SO_PASSCRED;
2461 		break;
2462         case TARGET_SO_PASSSEC:
2463                 optname = SO_PASSSEC;
2464                 break;
2465         case TARGET_SO_TIMESTAMP:
2466 		optname = SO_TIMESTAMP;
2467 		break;
2468         case TARGET_SO_RCVLOWAT:
2469 		optname = SO_RCVLOWAT;
2470 		break;
2471         default:
2472             goto unimplemented;
2473         }
2474 	if (optlen < sizeof(uint32_t))
2475             return -TARGET_EINVAL;
2476 
2477 	if (get_user_u32(val, optval_addr))
2478             return -TARGET_EFAULT;
2479 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2480         break;
2481 #ifdef SOL_NETLINK
2482     case SOL_NETLINK:
2483         switch (optname) {
2484         case NETLINK_PKTINFO:
2485         case NETLINK_ADD_MEMBERSHIP:
2486         case NETLINK_DROP_MEMBERSHIP:
2487         case NETLINK_BROADCAST_ERROR:
2488         case NETLINK_NO_ENOBUFS:
2489 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2490         case NETLINK_LISTEN_ALL_NSID:
2491         case NETLINK_CAP_ACK:
2492 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2493 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2494         case NETLINK_EXT_ACK:
2495 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2496 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2497         case NETLINK_GET_STRICT_CHK:
2498 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2499             break;
2500         default:
2501             goto unimplemented;
2502         }
2503         val = 0;
2504         if (optlen < sizeof(uint32_t)) {
2505             return -TARGET_EINVAL;
2506         }
2507         if (get_user_u32(val, optval_addr)) {
2508             return -TARGET_EFAULT;
2509         }
2510         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2511                                    sizeof(val)));
2512         break;
2513 #endif /* SOL_NETLINK */
2514     default:
2515     unimplemented:
2516         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2517                       level, optname);
2518         ret = -TARGET_ENOPROTOOPT;
2519     }
2520     return ret;
2521 }
2522 
2523 /* do_getsockopt() Must return target values and target errnos. */
2524 static abi_long do_getsockopt(int sockfd, int level, int optname,
2525                               abi_ulong optval_addr, abi_ulong optlen)
2526 {
2527     abi_long ret;
2528     int len, val;
2529     socklen_t lv;
2530 
2531     switch(level) {
2532     case TARGET_SOL_SOCKET:
2533         level = SOL_SOCKET;
2534         switch (optname) {
2535         /* These don't just return a single integer */
2536         case TARGET_SO_PEERNAME:
2537             goto unimplemented;
2538         case TARGET_SO_RCVTIMEO: {
2539             struct timeval tv;
2540             socklen_t tvlen;
2541 
2542             optname = SO_RCVTIMEO;
2543 
2544 get_timeout:
2545             if (get_user_u32(len, optlen)) {
2546                 return -TARGET_EFAULT;
2547             }
2548             if (len < 0) {
2549                 return -TARGET_EINVAL;
2550             }
2551 
2552             tvlen = sizeof(tv);
2553             ret = get_errno(getsockopt(sockfd, level, optname,
2554                                        &tv, &tvlen));
2555             if (ret < 0) {
2556                 return ret;
2557             }
2558             if (len > sizeof(struct target_timeval)) {
2559                 len = sizeof(struct target_timeval);
2560             }
2561             if (copy_to_user_timeval(optval_addr, &tv)) {
2562                 return -TARGET_EFAULT;
2563             }
2564             if (put_user_u32(len, optlen)) {
2565                 return -TARGET_EFAULT;
2566             }
2567             break;
2568         }
2569         case TARGET_SO_SNDTIMEO:
2570             optname = SO_SNDTIMEO;
2571             goto get_timeout;
2572         case TARGET_SO_PEERCRED: {
2573             struct ucred cr;
2574             socklen_t crlen;
2575             struct target_ucred *tcr;
2576 
2577             if (get_user_u32(len, optlen)) {
2578                 return -TARGET_EFAULT;
2579             }
2580             if (len < 0) {
2581                 return -TARGET_EINVAL;
2582             }
2583 
2584             crlen = sizeof(cr);
2585             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2586                                        &cr, &crlen));
2587             if (ret < 0) {
2588                 return ret;
2589             }
2590             if (len > crlen) {
2591                 len = crlen;
2592             }
2593             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2594                 return -TARGET_EFAULT;
2595             }
2596             __put_user(cr.pid, &tcr->pid);
2597             __put_user(cr.uid, &tcr->uid);
2598             __put_user(cr.gid, &tcr->gid);
2599             unlock_user_struct(tcr, optval_addr, 1);
2600             if (put_user_u32(len, optlen)) {
2601                 return -TARGET_EFAULT;
2602             }
2603             break;
2604         }
2605         case TARGET_SO_PEERSEC: {
2606             char *name;
2607 
2608             if (get_user_u32(len, optlen)) {
2609                 return -TARGET_EFAULT;
2610             }
2611             if (len < 0) {
2612                 return -TARGET_EINVAL;
2613             }
2614             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2615             if (!name) {
2616                 return -TARGET_EFAULT;
2617             }
2618             lv = len;
2619             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2620                                        name, &lv));
2621             if (put_user_u32(lv, optlen)) {
2622                 ret = -TARGET_EFAULT;
2623             }
2624             unlock_user(name, optval_addr, lv);
2625             break;
2626         }
2627         case TARGET_SO_LINGER:
2628         {
2629             struct linger lg;
2630             socklen_t lglen;
2631             struct target_linger *tlg;
2632 
2633             if (get_user_u32(len, optlen)) {
2634                 return -TARGET_EFAULT;
2635             }
2636             if (len < 0) {
2637                 return -TARGET_EINVAL;
2638             }
2639 
2640             lglen = sizeof(lg);
2641             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2642                                        &lg, &lglen));
2643             if (ret < 0) {
2644                 return ret;
2645             }
2646             if (len > lglen) {
2647                 len = lglen;
2648             }
2649             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2650                 return -TARGET_EFAULT;
2651             }
2652             __put_user(lg.l_onoff, &tlg->l_onoff);
2653             __put_user(lg.l_linger, &tlg->l_linger);
2654             unlock_user_struct(tlg, optval_addr, 1);
2655             if (put_user_u32(len, optlen)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             break;
2659         }
2660         /* Options with 'int' argument.  */
2661         case TARGET_SO_DEBUG:
2662             optname = SO_DEBUG;
2663             goto int_case;
2664         case TARGET_SO_REUSEADDR:
2665             optname = SO_REUSEADDR;
2666             goto int_case;
2667 #ifdef SO_REUSEPORT
2668         case TARGET_SO_REUSEPORT:
2669             optname = SO_REUSEPORT;
2670             goto int_case;
2671 #endif
2672         case TARGET_SO_TYPE:
2673             optname = SO_TYPE;
2674             goto int_case;
2675         case TARGET_SO_ERROR:
2676             optname = SO_ERROR;
2677             goto int_case;
2678         case TARGET_SO_DONTROUTE:
2679             optname = SO_DONTROUTE;
2680             goto int_case;
2681         case TARGET_SO_BROADCAST:
2682             optname = SO_BROADCAST;
2683             goto int_case;
2684         case TARGET_SO_SNDBUF:
2685             optname = SO_SNDBUF;
2686             goto int_case;
2687         case TARGET_SO_RCVBUF:
2688             optname = SO_RCVBUF;
2689             goto int_case;
2690         case TARGET_SO_KEEPALIVE:
2691             optname = SO_KEEPALIVE;
2692             goto int_case;
2693         case TARGET_SO_OOBINLINE:
2694             optname = SO_OOBINLINE;
2695             goto int_case;
2696         case TARGET_SO_NO_CHECK:
2697             optname = SO_NO_CHECK;
2698             goto int_case;
2699         case TARGET_SO_PRIORITY:
2700             optname = SO_PRIORITY;
2701             goto int_case;
2702 #ifdef SO_BSDCOMPAT
2703         case TARGET_SO_BSDCOMPAT:
2704             optname = SO_BSDCOMPAT;
2705             goto int_case;
2706 #endif
2707         case TARGET_SO_PASSCRED:
2708             optname = SO_PASSCRED;
2709             goto int_case;
2710         case TARGET_SO_TIMESTAMP:
2711             optname = SO_TIMESTAMP;
2712             goto int_case;
2713         case TARGET_SO_RCVLOWAT:
2714             optname = SO_RCVLOWAT;
2715             goto int_case;
2716         case TARGET_SO_ACCEPTCONN:
2717             optname = SO_ACCEPTCONN;
2718             goto int_case;
2719         case TARGET_SO_PROTOCOL:
2720             optname = SO_PROTOCOL;
2721             goto int_case;
2722         case TARGET_SO_DOMAIN:
2723             optname = SO_DOMAIN;
2724             goto int_case;
2725         default:
2726             goto int_case;
2727         }
2728         break;
2729     case SOL_TCP:
2730     case SOL_UDP:
2731         /* TCP and UDP options all take an 'int' value.  */
2732     int_case:
2733         if (get_user_u32(len, optlen))
2734             return -TARGET_EFAULT;
2735         if (len < 0)
2736             return -TARGET_EINVAL;
2737         lv = sizeof(lv);
2738         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2739         if (ret < 0)
2740             return ret;
2741         if (optname == SO_TYPE) {
2742             val = host_to_target_sock_type(val);
2743         }
2744         if (len > lv)
2745             len = lv;
2746         if (len == 4) {
2747             if (put_user_u32(val, optval_addr))
2748                 return -TARGET_EFAULT;
2749         } else {
2750             if (put_user_u8(val, optval_addr))
2751                 return -TARGET_EFAULT;
2752         }
2753         if (put_user_u32(len, optlen))
2754             return -TARGET_EFAULT;
2755         break;
2756     case SOL_IP:
2757         switch(optname) {
2758         case IP_TOS:
2759         case IP_TTL:
2760         case IP_HDRINCL:
2761         case IP_ROUTER_ALERT:
2762         case IP_RECVOPTS:
2763         case IP_RETOPTS:
2764         case IP_PKTINFO:
2765         case IP_MTU_DISCOVER:
2766         case IP_RECVERR:
2767         case IP_RECVTOS:
2768 #ifdef IP_FREEBIND
2769         case IP_FREEBIND:
2770 #endif
2771         case IP_MULTICAST_TTL:
2772         case IP_MULTICAST_LOOP:
2773             if (get_user_u32(len, optlen))
2774                 return -TARGET_EFAULT;
2775             if (len < 0)
2776                 return -TARGET_EINVAL;
2777             lv = sizeof(lv);
2778             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2779             if (ret < 0)
2780                 return ret;
2781             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2782                 len = 1;
2783                 if (put_user_u32(len, optlen)
2784                     || put_user_u8(val, optval_addr))
2785                     return -TARGET_EFAULT;
2786             } else {
2787                 if (len > sizeof(int))
2788                     len = sizeof(int);
2789                 if (put_user_u32(len, optlen)
2790                     || put_user_u32(val, optval_addr))
2791                     return -TARGET_EFAULT;
2792             }
2793             break;
2794         default:
2795             ret = -TARGET_ENOPROTOOPT;
2796             break;
2797         }
2798         break;
2799     case SOL_IPV6:
2800         switch (optname) {
2801         case IPV6_MTU_DISCOVER:
2802         case IPV6_MTU:
2803         case IPV6_V6ONLY:
2804         case IPV6_RECVPKTINFO:
2805         case IPV6_UNICAST_HOPS:
2806         case IPV6_MULTICAST_HOPS:
2807         case IPV6_MULTICAST_LOOP:
2808         case IPV6_RECVERR:
2809         case IPV6_RECVHOPLIMIT:
2810         case IPV6_2292HOPLIMIT:
2811         case IPV6_CHECKSUM:
2812         case IPV6_ADDRFORM:
2813         case IPV6_2292PKTINFO:
2814         case IPV6_RECVTCLASS:
2815         case IPV6_RECVRTHDR:
2816         case IPV6_2292RTHDR:
2817         case IPV6_RECVHOPOPTS:
2818         case IPV6_2292HOPOPTS:
2819         case IPV6_RECVDSTOPTS:
2820         case IPV6_2292DSTOPTS:
2821         case IPV6_TCLASS:
2822         case IPV6_ADDR_PREFERENCES:
2823 #ifdef IPV6_RECVPATHMTU
2824         case IPV6_RECVPATHMTU:
2825 #endif
2826 #ifdef IPV6_TRANSPARENT
2827         case IPV6_TRANSPARENT:
2828 #endif
2829 #ifdef IPV6_FREEBIND
2830         case IPV6_FREEBIND:
2831 #endif
2832 #ifdef IPV6_RECVORIGDSTADDR
2833         case IPV6_RECVORIGDSTADDR:
2834 #endif
2835             if (get_user_u32(len, optlen))
2836                 return -TARGET_EFAULT;
2837             if (len < 0)
2838                 return -TARGET_EINVAL;
2839             lv = sizeof(lv);
2840             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2841             if (ret < 0)
2842                 return ret;
2843             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2844                 len = 1;
2845                 if (put_user_u32(len, optlen)
2846                     || put_user_u8(val, optval_addr))
2847                     return -TARGET_EFAULT;
2848             } else {
2849                 if (len > sizeof(int))
2850                     len = sizeof(int);
2851                 if (put_user_u32(len, optlen)
2852                     || put_user_u32(val, optval_addr))
2853                     return -TARGET_EFAULT;
2854             }
2855             break;
2856         default:
2857             ret = -TARGET_ENOPROTOOPT;
2858             break;
2859         }
2860         break;
2861 #ifdef SOL_NETLINK
2862     case SOL_NETLINK:
2863         switch (optname) {
2864         case NETLINK_PKTINFO:
2865         case NETLINK_BROADCAST_ERROR:
2866         case NETLINK_NO_ENOBUFS:
2867 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2868         case NETLINK_LISTEN_ALL_NSID:
2869         case NETLINK_CAP_ACK:
2870 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2871 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2872         case NETLINK_EXT_ACK:
2873 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2874 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2875         case NETLINK_GET_STRICT_CHK:
2876 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2877             if (get_user_u32(len, optlen)) {
2878                 return -TARGET_EFAULT;
2879             }
2880             if (len != sizeof(val)) {
2881                 return -TARGET_EINVAL;
2882             }
2883             lv = len;
2884             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2885             if (ret < 0) {
2886                 return ret;
2887             }
2888             if (put_user_u32(lv, optlen)
2889                 || put_user_u32(val, optval_addr)) {
2890                 return -TARGET_EFAULT;
2891             }
2892             break;
2893 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2894         case NETLINK_LIST_MEMBERSHIPS:
2895         {
2896             uint32_t *results;
2897             int i;
2898             if (get_user_u32(len, optlen)) {
2899                 return -TARGET_EFAULT;
2900             }
2901             if (len < 0) {
2902                 return -TARGET_EINVAL;
2903             }
2904             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2905             if (!results && len > 0) {
2906                 return -TARGET_EFAULT;
2907             }
2908             lv = len;
2909             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2910             if (ret < 0) {
2911                 unlock_user(results, optval_addr, 0);
2912                 return ret;
2913             }
2914             /* swap host endianess to target endianess. */
2915             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2916                 results[i] = tswap32(results[i]);
2917             }
2918             if (put_user_u32(lv, optlen)) {
2919                 return -TARGET_EFAULT;
2920             }
2921             unlock_user(results, optval_addr, 0);
2922             break;
2923         }
2924 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2925         default:
2926             goto unimplemented;
2927         }
2928         break;
2929 #endif /* SOL_NETLINK */
2930     default:
2931     unimplemented:
2932         qemu_log_mask(LOG_UNIMP,
2933                       "getsockopt level=%d optname=%d not yet supported\n",
2934                       level, optname);
2935         ret = -TARGET_EOPNOTSUPP;
2936         break;
2937     }
2938     return ret;
2939 }
2940 
2941 /* Convert target low/high pair representing file offset into the host
2942  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2943  * as the kernel doesn't handle them either.
2944  */
2945 static void target_to_host_low_high(abi_ulong tlow,
2946                                     abi_ulong thigh,
2947                                     unsigned long *hlow,
2948                                     unsigned long *hhigh)
2949 {
2950     uint64_t off = tlow |
2951         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2952         TARGET_LONG_BITS / 2;
2953 
2954     *hlow = off;
2955     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2956 }
2957 
2958 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2959                                 abi_ulong count, int copy)
2960 {
2961     struct target_iovec *target_vec;
2962     struct iovec *vec;
2963     abi_ulong total_len, max_len;
2964     int i;
2965     int err = 0;
2966     bool bad_address = false;
2967 
2968     if (count == 0) {
2969         errno = 0;
2970         return NULL;
2971     }
2972     if (count > IOV_MAX) {
2973         errno = EINVAL;
2974         return NULL;
2975     }
2976 
2977     vec = g_try_new0(struct iovec, count);
2978     if (vec == NULL) {
2979         errno = ENOMEM;
2980         return NULL;
2981     }
2982 
2983     target_vec = lock_user(VERIFY_READ, target_addr,
2984                            count * sizeof(struct target_iovec), 1);
2985     if (target_vec == NULL) {
2986         err = EFAULT;
2987         goto fail2;
2988     }
2989 
2990     /* ??? If host page size > target page size, this will result in a
2991        value larger than what we can actually support.  */
2992     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2993     total_len = 0;
2994 
2995     for (i = 0; i < count; i++) {
2996         abi_ulong base = tswapal(target_vec[i].iov_base);
2997         abi_long len = tswapal(target_vec[i].iov_len);
2998 
2999         if (len < 0) {
3000             err = EINVAL;
3001             goto fail;
3002         } else if (len == 0) {
3003             /* Zero length pointer is ignored.  */
3004             vec[i].iov_base = 0;
3005         } else {
3006             vec[i].iov_base = lock_user(type, base, len, copy);
3007             /* If the first buffer pointer is bad, this is a fault.  But
3008              * subsequent bad buffers will result in a partial write; this
3009              * is realized by filling the vector with null pointers and
3010              * zero lengths. */
3011             if (!vec[i].iov_base) {
3012                 if (i == 0) {
3013                     err = EFAULT;
3014                     goto fail;
3015                 } else {
3016                     bad_address = true;
3017                 }
3018             }
3019             if (bad_address) {
3020                 len = 0;
3021             }
3022             if (len > max_len - total_len) {
3023                 len = max_len - total_len;
3024             }
3025         }
3026         vec[i].iov_len = len;
3027         total_len += len;
3028     }
3029 
3030     unlock_user(target_vec, target_addr, 0);
3031     return vec;
3032 
3033  fail:
3034     while (--i >= 0) {
3035         if (tswapal(target_vec[i].iov_len) > 0) {
3036             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3037         }
3038     }
3039     unlock_user(target_vec, target_addr, 0);
3040  fail2:
3041     g_free(vec);
3042     errno = err;
3043     return NULL;
3044 }
3045 
3046 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3047                          abi_ulong count, int copy)
3048 {
3049     struct target_iovec *target_vec;
3050     int i;
3051 
3052     target_vec = lock_user(VERIFY_READ, target_addr,
3053                            count * sizeof(struct target_iovec), 1);
3054     if (target_vec) {
3055         for (i = 0; i < count; i++) {
3056             abi_ulong base = tswapal(target_vec[i].iov_base);
3057             abi_long len = tswapal(target_vec[i].iov_len);
3058             if (len < 0) {
3059                 break;
3060             }
3061             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3062         }
3063         unlock_user(target_vec, target_addr, 0);
3064     }
3065 
3066     g_free(vec);
3067 }
3068 
3069 static inline int target_to_host_sock_type(int *type)
3070 {
3071     int host_type = 0;
3072     int target_type = *type;
3073 
3074     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3075     case TARGET_SOCK_DGRAM:
3076         host_type = SOCK_DGRAM;
3077         break;
3078     case TARGET_SOCK_STREAM:
3079         host_type = SOCK_STREAM;
3080         break;
3081     default:
3082         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3083         break;
3084     }
3085     if (target_type & TARGET_SOCK_CLOEXEC) {
3086 #if defined(SOCK_CLOEXEC)
3087         host_type |= SOCK_CLOEXEC;
3088 #else
3089         return -TARGET_EINVAL;
3090 #endif
3091     }
3092     if (target_type & TARGET_SOCK_NONBLOCK) {
3093 #if defined(SOCK_NONBLOCK)
3094         host_type |= SOCK_NONBLOCK;
3095 #elif !defined(O_NONBLOCK)
3096         return -TARGET_EINVAL;
3097 #endif
3098     }
3099     *type = host_type;
3100     return 0;
3101 }
3102 
3103 /* Try to emulate socket type flags after socket creation.  */
3104 static int sock_flags_fixup(int fd, int target_type)
3105 {
3106 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3107     if (target_type & TARGET_SOCK_NONBLOCK) {
3108         int flags = fcntl(fd, F_GETFL);
3109         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3110             close(fd);
3111             return -TARGET_EINVAL;
3112         }
3113     }
3114 #endif
3115     return fd;
3116 }
3117 
3118 /* do_socket() Must return target values and target errnos. */
3119 static abi_long do_socket(int domain, int type, int protocol)
3120 {
3121     int target_type = type;
3122     int ret;
3123 
3124     ret = target_to_host_sock_type(&type);
3125     if (ret) {
3126         return ret;
3127     }
3128 
3129     if (domain == PF_NETLINK && !(
3130 #ifdef CONFIG_RTNETLINK
3131          protocol == NETLINK_ROUTE ||
3132 #endif
3133          protocol == NETLINK_KOBJECT_UEVENT ||
3134          protocol == NETLINK_AUDIT)) {
3135         return -TARGET_EPROTONOSUPPORT;
3136     }
3137 
3138     if (domain == AF_PACKET ||
3139         (domain == AF_INET && type == SOCK_PACKET)) {
3140         protocol = tswap16(protocol);
3141     }
3142 
3143     ret = get_errno(socket(domain, type, protocol));
3144     if (ret >= 0) {
3145         ret = sock_flags_fixup(ret, target_type);
3146         if (type == SOCK_PACKET) {
3147             /* Manage an obsolete case :
3148              * if socket type is SOCK_PACKET, bind by name
3149              */
3150             fd_trans_register(ret, &target_packet_trans);
3151         } else if (domain == PF_NETLINK) {
3152             switch (protocol) {
3153 #ifdef CONFIG_RTNETLINK
3154             case NETLINK_ROUTE:
3155                 fd_trans_register(ret, &target_netlink_route_trans);
3156                 break;
3157 #endif
3158             case NETLINK_KOBJECT_UEVENT:
3159                 /* nothing to do: messages are strings */
3160                 break;
3161             case NETLINK_AUDIT:
3162                 fd_trans_register(ret, &target_netlink_audit_trans);
3163                 break;
3164             default:
3165                 g_assert_not_reached();
3166             }
3167         }
3168     }
3169     return ret;
3170 }
3171 
3172 /* do_bind() Must return target values and target errnos. */
3173 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3174                         socklen_t addrlen)
3175 {
3176     void *addr;
3177     abi_long ret;
3178 
3179     if ((int)addrlen < 0) {
3180         return -TARGET_EINVAL;
3181     }
3182 
3183     addr = alloca(addrlen+1);
3184 
3185     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3186     if (ret)
3187         return ret;
3188 
3189     return get_errno(bind(sockfd, addr, addrlen));
3190 }
3191 
3192 /* do_connect() Must return target values and target errnos. */
3193 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3194                            socklen_t addrlen)
3195 {
3196     void *addr;
3197     abi_long ret;
3198 
3199     if ((int)addrlen < 0) {
3200         return -TARGET_EINVAL;
3201     }
3202 
3203     addr = alloca(addrlen+1);
3204 
3205     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3206     if (ret)
3207         return ret;
3208 
3209     return get_errno(safe_connect(sockfd, addr, addrlen));
3210 }
3211 
3212 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3213 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3214                                       int flags, int send)
3215 {
3216     abi_long ret, len;
3217     struct msghdr msg;
3218     abi_ulong count;
3219     struct iovec *vec;
3220     abi_ulong target_vec;
3221 
3222     if (msgp->msg_name) {
3223         msg.msg_namelen = tswap32(msgp->msg_namelen);
3224         msg.msg_name = alloca(msg.msg_namelen+1);
3225         ret = target_to_host_sockaddr(fd, msg.msg_name,
3226                                       tswapal(msgp->msg_name),
3227                                       msg.msg_namelen);
3228         if (ret == -TARGET_EFAULT) {
3229             /* For connected sockets msg_name and msg_namelen must
3230              * be ignored, so returning EFAULT immediately is wrong.
3231              * Instead, pass a bad msg_name to the host kernel, and
3232              * let it decide whether to return EFAULT or not.
3233              */
3234             msg.msg_name = (void *)-1;
3235         } else if (ret) {
3236             goto out2;
3237         }
3238     } else {
3239         msg.msg_name = NULL;
3240         msg.msg_namelen = 0;
3241     }
3242     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3243     msg.msg_control = alloca(msg.msg_controllen);
3244     memset(msg.msg_control, 0, msg.msg_controllen);
3245 
3246     msg.msg_flags = tswap32(msgp->msg_flags);
3247 
3248     count = tswapal(msgp->msg_iovlen);
3249     target_vec = tswapal(msgp->msg_iov);
3250 
3251     if (count > IOV_MAX) {
3252         /* sendrcvmsg returns a different errno for this condition than
3253          * readv/writev, so we must catch it here before lock_iovec() does.
3254          */
3255         ret = -TARGET_EMSGSIZE;
3256         goto out2;
3257     }
3258 
3259     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3260                      target_vec, count, send);
3261     if (vec == NULL) {
3262         ret = -host_to_target_errno(errno);
3263         goto out2;
3264     }
3265     msg.msg_iovlen = count;
3266     msg.msg_iov = vec;
3267 
3268     if (send) {
3269         if (fd_trans_target_to_host_data(fd)) {
3270             void *host_msg;
3271 
3272             host_msg = g_malloc(msg.msg_iov->iov_len);
3273             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3274             ret = fd_trans_target_to_host_data(fd)(host_msg,
3275                                                    msg.msg_iov->iov_len);
3276             if (ret >= 0) {
3277                 msg.msg_iov->iov_base = host_msg;
3278                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3279             }
3280             g_free(host_msg);
3281         } else {
3282             ret = target_to_host_cmsg(&msg, msgp);
3283             if (ret == 0) {
3284                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3285             }
3286         }
3287     } else {
3288         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3289         if (!is_error(ret)) {
3290             len = ret;
3291             if (fd_trans_host_to_target_data(fd)) {
3292                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3293                                                MIN(msg.msg_iov->iov_len, len));
3294             } else {
3295                 ret = host_to_target_cmsg(msgp, &msg);
3296             }
3297             if (!is_error(ret)) {
3298                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3299                 msgp->msg_flags = tswap32(msg.msg_flags);
3300                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3301                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3302                                     msg.msg_name, msg.msg_namelen);
3303                     if (ret) {
3304                         goto out;
3305                     }
3306                 }
3307 
3308                 ret = len;
3309             }
3310         }
3311     }
3312 
3313 out:
3314     unlock_iovec(vec, target_vec, count, !send);
3315 out2:
3316     return ret;
3317 }
3318 
3319 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3320                                int flags, int send)
3321 {
3322     abi_long ret;
3323     struct target_msghdr *msgp;
3324 
3325     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3326                           msgp,
3327                           target_msg,
3328                           send ? 1 : 0)) {
3329         return -TARGET_EFAULT;
3330     }
3331     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3332     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3333     return ret;
3334 }
3335 
3336 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3337  * so it might not have this *mmsg-specific flag either.
3338  */
3339 #ifndef MSG_WAITFORONE
3340 #define MSG_WAITFORONE 0x10000
3341 #endif
3342 
3343 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3344                                 unsigned int vlen, unsigned int flags,
3345                                 int send)
3346 {
3347     struct target_mmsghdr *mmsgp;
3348     abi_long ret = 0;
3349     int i;
3350 
3351     if (vlen > UIO_MAXIOV) {
3352         vlen = UIO_MAXIOV;
3353     }
3354 
3355     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3356     if (!mmsgp) {
3357         return -TARGET_EFAULT;
3358     }
3359 
3360     for (i = 0; i < vlen; i++) {
3361         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3362         if (is_error(ret)) {
3363             break;
3364         }
3365         mmsgp[i].msg_len = tswap32(ret);
3366         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3367         if (flags & MSG_WAITFORONE) {
3368             flags |= MSG_DONTWAIT;
3369         }
3370     }
3371 
3372     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3373 
3374     /* Return number of datagrams sent if we sent any at all;
3375      * otherwise return the error.
3376      */
3377     if (i) {
3378         return i;
3379     }
3380     return ret;
3381 }
3382 
3383 /* do_accept4() Must return target values and target errnos. */
3384 static abi_long do_accept4(int fd, abi_ulong target_addr,
3385                            abi_ulong target_addrlen_addr, int flags)
3386 {
3387     socklen_t addrlen, ret_addrlen;
3388     void *addr;
3389     abi_long ret;
3390     int host_flags;
3391 
3392     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3393 
3394     if (target_addr == 0) {
3395         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3396     }
3397 
3398     /* linux returns EFAULT if addrlen pointer is invalid */
3399     if (get_user_u32(addrlen, target_addrlen_addr))
3400         return -TARGET_EFAULT;
3401 
3402     if ((int)addrlen < 0) {
3403         return -TARGET_EINVAL;
3404     }
3405 
3406     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3407         return -TARGET_EFAULT;
3408     }
3409 
3410     addr = alloca(addrlen);
3411 
3412     ret_addrlen = addrlen;
3413     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3414     if (!is_error(ret)) {
3415         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3416         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3417             ret = -TARGET_EFAULT;
3418         }
3419     }
3420     return ret;
3421 }
3422 
3423 /* do_getpeername() Must return target values and target errnos. */
3424 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3425                                abi_ulong target_addrlen_addr)
3426 {
3427     socklen_t addrlen, ret_addrlen;
3428     void *addr;
3429     abi_long ret;
3430 
3431     if (get_user_u32(addrlen, target_addrlen_addr))
3432         return -TARGET_EFAULT;
3433 
3434     if ((int)addrlen < 0) {
3435         return -TARGET_EINVAL;
3436     }
3437 
3438     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3439         return -TARGET_EFAULT;
3440     }
3441 
3442     addr = alloca(addrlen);
3443 
3444     ret_addrlen = addrlen;
3445     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3446     if (!is_error(ret)) {
3447         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3448         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3449             ret = -TARGET_EFAULT;
3450         }
3451     }
3452     return ret;
3453 }
3454 
3455 /* do_getsockname() Must return target values and target errnos. */
3456 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3457                                abi_ulong target_addrlen_addr)
3458 {
3459     socklen_t addrlen, ret_addrlen;
3460     void *addr;
3461     abi_long ret;
3462 
3463     if (get_user_u32(addrlen, target_addrlen_addr))
3464         return -TARGET_EFAULT;
3465 
3466     if ((int)addrlen < 0) {
3467         return -TARGET_EINVAL;
3468     }
3469 
3470     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3471         return -TARGET_EFAULT;
3472     }
3473 
3474     addr = alloca(addrlen);
3475 
3476     ret_addrlen = addrlen;
3477     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3478     if (!is_error(ret)) {
3479         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3480         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3481             ret = -TARGET_EFAULT;
3482         }
3483     }
3484     return ret;
3485 }
3486 
3487 /* do_socketpair() Must return target values and target errnos. */
3488 static abi_long do_socketpair(int domain, int type, int protocol,
3489                               abi_ulong target_tab_addr)
3490 {
3491     int tab[2];
3492     abi_long ret;
3493 
3494     target_to_host_sock_type(&type);
3495 
3496     ret = get_errno(socketpair(domain, type, protocol, tab));
3497     if (!is_error(ret)) {
3498         if (put_user_s32(tab[0], target_tab_addr)
3499             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3500             ret = -TARGET_EFAULT;
3501     }
3502     return ret;
3503 }
3504 
3505 /* do_sendto() Must return target values and target errnos. */
3506 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3507                           abi_ulong target_addr, socklen_t addrlen)
3508 {
3509     void *addr;
3510     void *host_msg;
3511     void *copy_msg = NULL;
3512     abi_long ret;
3513 
3514     if ((int)addrlen < 0) {
3515         return -TARGET_EINVAL;
3516     }
3517 
3518     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3519     if (!host_msg)
3520         return -TARGET_EFAULT;
3521     if (fd_trans_target_to_host_data(fd)) {
3522         copy_msg = host_msg;
3523         host_msg = g_malloc(len);
3524         memcpy(host_msg, copy_msg, len);
3525         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3526         if (ret < 0) {
3527             goto fail;
3528         }
3529     }
3530     if (target_addr) {
3531         addr = alloca(addrlen+1);
3532         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3533         if (ret) {
3534             goto fail;
3535         }
3536         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3537     } else {
3538         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3539     }
3540 fail:
3541     if (copy_msg) {
3542         g_free(host_msg);
3543         host_msg = copy_msg;
3544     }
3545     unlock_user(host_msg, msg, 0);
3546     return ret;
3547 }
3548 
3549 /* do_recvfrom() Must return target values and target errnos. */
3550 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3551                             abi_ulong target_addr,
3552                             abi_ulong target_addrlen)
3553 {
3554     socklen_t addrlen, ret_addrlen;
3555     void *addr;
3556     void *host_msg;
3557     abi_long ret;
3558 
3559     if (!msg) {
3560         host_msg = NULL;
3561     } else {
3562         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3563         if (!host_msg) {
3564             return -TARGET_EFAULT;
3565         }
3566     }
3567     if (target_addr) {
3568         if (get_user_u32(addrlen, target_addrlen)) {
3569             ret = -TARGET_EFAULT;
3570             goto fail;
3571         }
3572         if ((int)addrlen < 0) {
3573             ret = -TARGET_EINVAL;
3574             goto fail;
3575         }
3576         addr = alloca(addrlen);
3577         ret_addrlen = addrlen;
3578         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3579                                       addr, &ret_addrlen));
3580     } else {
3581         addr = NULL; /* To keep compiler quiet.  */
3582         addrlen = 0; /* To keep compiler quiet.  */
3583         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3584     }
3585     if (!is_error(ret)) {
3586         if (fd_trans_host_to_target_data(fd)) {
3587             abi_long trans;
3588             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3589             if (is_error(trans)) {
3590                 ret = trans;
3591                 goto fail;
3592             }
3593         }
3594         if (target_addr) {
3595             host_to_target_sockaddr(target_addr, addr,
3596                                     MIN(addrlen, ret_addrlen));
3597             if (put_user_u32(ret_addrlen, target_addrlen)) {
3598                 ret = -TARGET_EFAULT;
3599                 goto fail;
3600             }
3601         }
3602         unlock_user(host_msg, msg, len);
3603     } else {
3604 fail:
3605         unlock_user(host_msg, msg, 0);
3606     }
3607     return ret;
3608 }
3609 
3610 #ifdef TARGET_NR_socketcall
3611 /* do_socketcall() must return target values and target errnos. */
3612 static abi_long do_socketcall(int num, abi_ulong vptr)
3613 {
3614     static const unsigned nargs[] = { /* number of arguments per operation */
3615         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3616         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3617         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3618         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3619         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3620         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3621         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3622         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3623         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3624         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3625         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3626         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3627         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3628         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3629         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3630         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3631         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3632         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3633         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3634         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3635     };
3636     abi_long a[6]; /* max 6 args */
3637     unsigned i;
3638 
3639     /* check the range of the first argument num */
3640     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3641     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3642         return -TARGET_EINVAL;
3643     }
3644     /* ensure we have space for args */
3645     if (nargs[num] > ARRAY_SIZE(a)) {
3646         return -TARGET_EINVAL;
3647     }
3648     /* collect the arguments in a[] according to nargs[] */
3649     for (i = 0; i < nargs[num]; ++i) {
3650         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3651             return -TARGET_EFAULT;
3652         }
3653     }
3654     /* now when we have the args, invoke the appropriate underlying function */
3655     switch (num) {
3656     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3657         return do_socket(a[0], a[1], a[2]);
3658     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3659         return do_bind(a[0], a[1], a[2]);
3660     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3661         return do_connect(a[0], a[1], a[2]);
3662     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3663         return get_errno(listen(a[0], a[1]));
3664     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3665         return do_accept4(a[0], a[1], a[2], 0);
3666     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3667         return do_getsockname(a[0], a[1], a[2]);
3668     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3669         return do_getpeername(a[0], a[1], a[2]);
3670     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3671         return do_socketpair(a[0], a[1], a[2], a[3]);
3672     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3673         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3674     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3675         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3676     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3677         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3678     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3679         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3680     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3681         return get_errno(shutdown(a[0], a[1]));
3682     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3683         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3684     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3685         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3686     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3687         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3688     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3689         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3690     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3691         return do_accept4(a[0], a[1], a[2], a[3]);
3692     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3693         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3694     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3695         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3696     default:
3697         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3698         return -TARGET_EINVAL;
3699     }
3700 }
3701 #endif
3702 
3703 #define N_SHM_REGIONS	32
3704 
3705 static struct shm_region {
3706     abi_ulong start;
3707     abi_ulong size;
3708     bool in_use;
3709 } shm_regions[N_SHM_REGIONS];
3710 
3711 #ifndef TARGET_SEMID64_DS
3712 /* asm-generic version of this struct */
3713 struct target_semid64_ds
3714 {
3715   struct target_ipc_perm sem_perm;
3716   abi_ulong sem_otime;
3717 #if TARGET_ABI_BITS == 32
3718   abi_ulong __unused1;
3719 #endif
3720   abi_ulong sem_ctime;
3721 #if TARGET_ABI_BITS == 32
3722   abi_ulong __unused2;
3723 #endif
3724   abi_ulong sem_nsems;
3725   abi_ulong __unused3;
3726   abi_ulong __unused4;
3727 };
3728 #endif
3729 
3730 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3731                                                abi_ulong target_addr)
3732 {
3733     struct target_ipc_perm *target_ip;
3734     struct target_semid64_ds *target_sd;
3735 
3736     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3737         return -TARGET_EFAULT;
3738     target_ip = &(target_sd->sem_perm);
3739     host_ip->__key = tswap32(target_ip->__key);
3740     host_ip->uid = tswap32(target_ip->uid);
3741     host_ip->gid = tswap32(target_ip->gid);
3742     host_ip->cuid = tswap32(target_ip->cuid);
3743     host_ip->cgid = tswap32(target_ip->cgid);
3744 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3745     host_ip->mode = tswap32(target_ip->mode);
3746 #else
3747     host_ip->mode = tswap16(target_ip->mode);
3748 #endif
3749 #if defined(TARGET_PPC)
3750     host_ip->__seq = tswap32(target_ip->__seq);
3751 #else
3752     host_ip->__seq = tswap16(target_ip->__seq);
3753 #endif
3754     unlock_user_struct(target_sd, target_addr, 0);
3755     return 0;
3756 }
3757 
3758 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3759                                                struct ipc_perm *host_ip)
3760 {
3761     struct target_ipc_perm *target_ip;
3762     struct target_semid64_ds *target_sd;
3763 
3764     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3765         return -TARGET_EFAULT;
3766     target_ip = &(target_sd->sem_perm);
3767     target_ip->__key = tswap32(host_ip->__key);
3768     target_ip->uid = tswap32(host_ip->uid);
3769     target_ip->gid = tswap32(host_ip->gid);
3770     target_ip->cuid = tswap32(host_ip->cuid);
3771     target_ip->cgid = tswap32(host_ip->cgid);
3772 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3773     target_ip->mode = tswap32(host_ip->mode);
3774 #else
3775     target_ip->mode = tswap16(host_ip->mode);
3776 #endif
3777 #if defined(TARGET_PPC)
3778     target_ip->__seq = tswap32(host_ip->__seq);
3779 #else
3780     target_ip->__seq = tswap16(host_ip->__seq);
3781 #endif
3782     unlock_user_struct(target_sd, target_addr, 1);
3783     return 0;
3784 }
3785 
3786 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3787                                                abi_ulong target_addr)
3788 {
3789     struct target_semid64_ds *target_sd;
3790 
3791     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3792         return -TARGET_EFAULT;
3793     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3794         return -TARGET_EFAULT;
3795     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3796     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3797     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3798     unlock_user_struct(target_sd, target_addr, 0);
3799     return 0;
3800 }
3801 
3802 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3803                                                struct semid_ds *host_sd)
3804 {
3805     struct target_semid64_ds *target_sd;
3806 
3807     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3808         return -TARGET_EFAULT;
3809     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3810         return -TARGET_EFAULT;
3811     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3812     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3813     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3814     unlock_user_struct(target_sd, target_addr, 1);
3815     return 0;
3816 }
3817 
3818 struct target_seminfo {
3819     int semmap;
3820     int semmni;
3821     int semmns;
3822     int semmnu;
3823     int semmsl;
3824     int semopm;
3825     int semume;
3826     int semusz;
3827     int semvmx;
3828     int semaem;
3829 };
3830 
3831 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3832                                               struct seminfo *host_seminfo)
3833 {
3834     struct target_seminfo *target_seminfo;
3835     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3836         return -TARGET_EFAULT;
3837     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3838     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3839     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3840     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3841     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3842     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3843     __put_user(host_seminfo->semume, &target_seminfo->semume);
3844     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3845     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3846     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3847     unlock_user_struct(target_seminfo, target_addr, 1);
3848     return 0;
3849 }
3850 
3851 union semun {
3852 	int val;
3853 	struct semid_ds *buf;
3854 	unsigned short *array;
3855 	struct seminfo *__buf;
3856 };
3857 
3858 union target_semun {
3859 	int val;
3860 	abi_ulong buf;
3861 	abi_ulong array;
3862 	abi_ulong __buf;
3863 };
3864 
3865 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3866                                                abi_ulong target_addr)
3867 {
3868     int nsems;
3869     unsigned short *array;
3870     union semun semun;
3871     struct semid_ds semid_ds;
3872     int i, ret;
3873 
3874     semun.buf = &semid_ds;
3875 
3876     ret = semctl(semid, 0, IPC_STAT, semun);
3877     if (ret == -1)
3878         return get_errno(ret);
3879 
3880     nsems = semid_ds.sem_nsems;
3881 
3882     *host_array = g_try_new(unsigned short, nsems);
3883     if (!*host_array) {
3884         return -TARGET_ENOMEM;
3885     }
3886     array = lock_user(VERIFY_READ, target_addr,
3887                       nsems*sizeof(unsigned short), 1);
3888     if (!array) {
3889         g_free(*host_array);
3890         return -TARGET_EFAULT;
3891     }
3892 
3893     for(i=0; i<nsems; i++) {
3894         __get_user((*host_array)[i], &array[i]);
3895     }
3896     unlock_user(array, target_addr, 0);
3897 
3898     return 0;
3899 }
3900 
3901 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3902                                                unsigned short **host_array)
3903 {
3904     int nsems;
3905     unsigned short *array;
3906     union semun semun;
3907     struct semid_ds semid_ds;
3908     int i, ret;
3909 
3910     semun.buf = &semid_ds;
3911 
3912     ret = semctl(semid, 0, IPC_STAT, semun);
3913     if (ret == -1)
3914         return get_errno(ret);
3915 
3916     nsems = semid_ds.sem_nsems;
3917 
3918     array = lock_user(VERIFY_WRITE, target_addr,
3919                       nsems*sizeof(unsigned short), 0);
3920     if (!array)
3921         return -TARGET_EFAULT;
3922 
3923     for(i=0; i<nsems; i++) {
3924         __put_user((*host_array)[i], &array[i]);
3925     }
3926     g_free(*host_array);
3927     unlock_user(array, target_addr, 1);
3928 
3929     return 0;
3930 }
3931 
3932 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3933                                  abi_ulong target_arg)
3934 {
3935     union target_semun target_su = { .buf = target_arg };
3936     union semun arg;
3937     struct semid_ds dsarg;
3938     unsigned short *array = NULL;
3939     struct seminfo seminfo;
3940     abi_long ret = -TARGET_EINVAL;
3941     abi_long err;
3942     cmd &= 0xff;
3943 
3944     switch( cmd ) {
3945 	case GETVAL:
3946 	case SETVAL:
3947             /* In 64 bit cross-endian situations, we will erroneously pick up
3948              * the wrong half of the union for the "val" element.  To rectify
3949              * this, the entire 8-byte structure is byteswapped, followed by
3950 	     * a swap of the 4 byte val field. In other cases, the data is
3951 	     * already in proper host byte order. */
3952 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3953 		target_su.buf = tswapal(target_su.buf);
3954 		arg.val = tswap32(target_su.val);
3955 	    } else {
3956 		arg.val = target_su.val;
3957 	    }
3958             ret = get_errno(semctl(semid, semnum, cmd, arg));
3959             break;
3960 	case GETALL:
3961 	case SETALL:
3962             err = target_to_host_semarray(semid, &array, target_su.array);
3963             if (err)
3964                 return err;
3965             arg.array = array;
3966             ret = get_errno(semctl(semid, semnum, cmd, arg));
3967             err = host_to_target_semarray(semid, target_su.array, &array);
3968             if (err)
3969                 return err;
3970             break;
3971 	case IPC_STAT:
3972 	case IPC_SET:
3973 	case SEM_STAT:
3974             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3975             if (err)
3976                 return err;
3977             arg.buf = &dsarg;
3978             ret = get_errno(semctl(semid, semnum, cmd, arg));
3979             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3980             if (err)
3981                 return err;
3982             break;
3983 	case IPC_INFO:
3984 	case SEM_INFO:
3985             arg.__buf = &seminfo;
3986             ret = get_errno(semctl(semid, semnum, cmd, arg));
3987             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3988             if (err)
3989                 return err;
3990             break;
3991 	case IPC_RMID:
3992 	case GETPID:
3993 	case GETNCNT:
3994 	case GETZCNT:
3995             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3996             break;
3997     }
3998 
3999     return ret;
4000 }
4001 
4002 struct target_sembuf {
4003     unsigned short sem_num;
4004     short sem_op;
4005     short sem_flg;
4006 };
4007 
4008 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4009                                              abi_ulong target_addr,
4010                                              unsigned nsops)
4011 {
4012     struct target_sembuf *target_sembuf;
4013     int i;
4014 
4015     target_sembuf = lock_user(VERIFY_READ, target_addr,
4016                               nsops*sizeof(struct target_sembuf), 1);
4017     if (!target_sembuf)
4018         return -TARGET_EFAULT;
4019 
4020     for(i=0; i<nsops; i++) {
4021         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4022         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4023         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4024     }
4025 
4026     unlock_user(target_sembuf, target_addr, 0);
4027 
4028     return 0;
4029 }
4030 
4031 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4032     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4033 
4034 /*
4035  * This macro is required to handle the s390 variants, which passes the
4036  * arguments in a different order than default.
4037  */
4038 #ifdef __s390x__
4039 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4040   (__nsops), (__timeout), (__sops)
4041 #else
4042 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4043   (__nsops), 0, (__sops), (__timeout)
4044 #endif
4045 
4046 static inline abi_long do_semtimedop(int semid,
4047                                      abi_long ptr,
4048                                      unsigned nsops,
4049                                      abi_long timeout, bool time64)
4050 {
4051     struct sembuf *sops;
4052     struct timespec ts, *pts = NULL;
4053     abi_long ret;
4054 
4055     if (timeout) {
4056         pts = &ts;
4057         if (time64) {
4058             if (target_to_host_timespec64(pts, timeout)) {
4059                 return -TARGET_EFAULT;
4060             }
4061         } else {
4062             if (target_to_host_timespec(pts, timeout)) {
4063                 return -TARGET_EFAULT;
4064             }
4065         }
4066     }
4067 
4068     if (nsops > TARGET_SEMOPM) {
4069         return -TARGET_E2BIG;
4070     }
4071 
4072     sops = g_new(struct sembuf, nsops);
4073 
4074     if (target_to_host_sembuf(sops, ptr, nsops)) {
4075         g_free(sops);
4076         return -TARGET_EFAULT;
4077     }
4078 
4079     ret = -TARGET_ENOSYS;
4080 #ifdef __NR_semtimedop
4081     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4082 #endif
4083 #ifdef __NR_ipc
4084     if (ret == -TARGET_ENOSYS) {
4085         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4086                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4087     }
4088 #endif
4089     g_free(sops);
4090     return ret;
4091 }
4092 #endif
4093 
4094 struct target_msqid_ds
4095 {
4096     struct target_ipc_perm msg_perm;
4097     abi_ulong msg_stime;
4098 #if TARGET_ABI_BITS == 32
4099     abi_ulong __unused1;
4100 #endif
4101     abi_ulong msg_rtime;
4102 #if TARGET_ABI_BITS == 32
4103     abi_ulong __unused2;
4104 #endif
4105     abi_ulong msg_ctime;
4106 #if TARGET_ABI_BITS == 32
4107     abi_ulong __unused3;
4108 #endif
4109     abi_ulong __msg_cbytes;
4110     abi_ulong msg_qnum;
4111     abi_ulong msg_qbytes;
4112     abi_ulong msg_lspid;
4113     abi_ulong msg_lrpid;
4114     abi_ulong __unused4;
4115     abi_ulong __unused5;
4116 };
4117 
4118 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4119                                                abi_ulong target_addr)
4120 {
4121     struct target_msqid_ds *target_md;
4122 
4123     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4124         return -TARGET_EFAULT;
4125     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4126         return -TARGET_EFAULT;
4127     host_md->msg_stime = tswapal(target_md->msg_stime);
4128     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4129     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4130     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4131     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4132     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4133     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4134     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4135     unlock_user_struct(target_md, target_addr, 0);
4136     return 0;
4137 }
4138 
4139 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4140                                                struct msqid_ds *host_md)
4141 {
4142     struct target_msqid_ds *target_md;
4143 
4144     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4145         return -TARGET_EFAULT;
4146     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4147         return -TARGET_EFAULT;
4148     target_md->msg_stime = tswapal(host_md->msg_stime);
4149     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4150     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4151     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4152     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4153     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4154     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4155     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4156     unlock_user_struct(target_md, target_addr, 1);
4157     return 0;
4158 }
4159 
4160 struct target_msginfo {
4161     int msgpool;
4162     int msgmap;
4163     int msgmax;
4164     int msgmnb;
4165     int msgmni;
4166     int msgssz;
4167     int msgtql;
4168     unsigned short int msgseg;
4169 };
4170 
4171 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4172                                               struct msginfo *host_msginfo)
4173 {
4174     struct target_msginfo *target_msginfo;
4175     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4176         return -TARGET_EFAULT;
4177     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4178     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4179     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4180     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4181     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4182     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4183     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4184     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4185     unlock_user_struct(target_msginfo, target_addr, 1);
4186     return 0;
4187 }
4188 
4189 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4190 {
4191     struct msqid_ds dsarg;
4192     struct msginfo msginfo;
4193     abi_long ret = -TARGET_EINVAL;
4194 
4195     cmd &= 0xff;
4196 
4197     switch (cmd) {
4198     case IPC_STAT:
4199     case IPC_SET:
4200     case MSG_STAT:
4201         if (target_to_host_msqid_ds(&dsarg,ptr))
4202             return -TARGET_EFAULT;
4203         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4204         if (host_to_target_msqid_ds(ptr,&dsarg))
4205             return -TARGET_EFAULT;
4206         break;
4207     case IPC_RMID:
4208         ret = get_errno(msgctl(msgid, cmd, NULL));
4209         break;
4210     case IPC_INFO:
4211     case MSG_INFO:
4212         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4213         if (host_to_target_msginfo(ptr, &msginfo))
4214             return -TARGET_EFAULT;
4215         break;
4216     }
4217 
4218     return ret;
4219 }
4220 
4221 struct target_msgbuf {
4222     abi_long mtype;
4223     char	mtext[1];
4224 };
4225 
4226 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4227                                  ssize_t msgsz, int msgflg)
4228 {
4229     struct target_msgbuf *target_mb;
4230     struct msgbuf *host_mb;
4231     abi_long ret = 0;
4232 
4233     if (msgsz < 0) {
4234         return -TARGET_EINVAL;
4235     }
4236 
4237     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4238         return -TARGET_EFAULT;
4239     host_mb = g_try_malloc(msgsz + sizeof(long));
4240     if (!host_mb) {
4241         unlock_user_struct(target_mb, msgp, 0);
4242         return -TARGET_ENOMEM;
4243     }
4244     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4245     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4246     ret = -TARGET_ENOSYS;
4247 #ifdef __NR_msgsnd
4248     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4249 #endif
4250 #ifdef __NR_ipc
4251     if (ret == -TARGET_ENOSYS) {
4252 #ifdef __s390x__
4253         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4254                                  host_mb));
4255 #else
4256         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4257                                  host_mb, 0));
4258 #endif
4259     }
4260 #endif
4261     g_free(host_mb);
4262     unlock_user_struct(target_mb, msgp, 0);
4263 
4264     return ret;
4265 }
4266 
4267 #ifdef __NR_ipc
4268 #if defined(__sparc__)
4269 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4270 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4271 #elif defined(__s390x__)
4272 /* The s390 sys_ipc variant has only five parameters.  */
4273 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4274     ((long int[]){(long int)__msgp, __msgtyp})
4275 #else
4276 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4277     ((long int[]){(long int)__msgp, __msgtyp}), 0
4278 #endif
4279 #endif
4280 
4281 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4282                                  ssize_t msgsz, abi_long msgtyp,
4283                                  int msgflg)
4284 {
4285     struct target_msgbuf *target_mb;
4286     char *target_mtext;
4287     struct msgbuf *host_mb;
4288     abi_long ret = 0;
4289 
4290     if (msgsz < 0) {
4291         return -TARGET_EINVAL;
4292     }
4293 
4294     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4295         return -TARGET_EFAULT;
4296 
4297     host_mb = g_try_malloc(msgsz + sizeof(long));
4298     if (!host_mb) {
4299         ret = -TARGET_ENOMEM;
4300         goto end;
4301     }
4302     ret = -TARGET_ENOSYS;
4303 #ifdef __NR_msgrcv
4304     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4305 #endif
4306 #ifdef __NR_ipc
4307     if (ret == -TARGET_ENOSYS) {
4308         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4309                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4310     }
4311 #endif
4312 
4313     if (ret > 0) {
4314         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4315         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4316         if (!target_mtext) {
4317             ret = -TARGET_EFAULT;
4318             goto end;
4319         }
4320         memcpy(target_mb->mtext, host_mb->mtext, ret);
4321         unlock_user(target_mtext, target_mtext_addr, ret);
4322     }
4323 
4324     target_mb->mtype = tswapal(host_mb->mtype);
4325 
4326 end:
4327     if (target_mb)
4328         unlock_user_struct(target_mb, msgp, 1);
4329     g_free(host_mb);
4330     return ret;
4331 }
4332 
4333 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4334                                                abi_ulong target_addr)
4335 {
4336     struct target_shmid_ds *target_sd;
4337 
4338     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4339         return -TARGET_EFAULT;
4340     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4341         return -TARGET_EFAULT;
4342     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4343     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4344     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4345     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4346     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4347     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4348     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4349     unlock_user_struct(target_sd, target_addr, 0);
4350     return 0;
4351 }
4352 
4353 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4354                                                struct shmid_ds *host_sd)
4355 {
4356     struct target_shmid_ds *target_sd;
4357 
4358     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4359         return -TARGET_EFAULT;
4360     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4361         return -TARGET_EFAULT;
4362     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4363     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4364     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4365     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4366     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4367     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4368     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4369     unlock_user_struct(target_sd, target_addr, 1);
4370     return 0;
4371 }
4372 
4373 struct  target_shminfo {
4374     abi_ulong shmmax;
4375     abi_ulong shmmin;
4376     abi_ulong shmmni;
4377     abi_ulong shmseg;
4378     abi_ulong shmall;
4379 };
4380 
4381 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4382                                               struct shminfo *host_shminfo)
4383 {
4384     struct target_shminfo *target_shminfo;
4385     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4386         return -TARGET_EFAULT;
4387     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4388     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4389     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4390     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4391     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4392     unlock_user_struct(target_shminfo, target_addr, 1);
4393     return 0;
4394 }
4395 
4396 struct target_shm_info {
4397     int used_ids;
4398     abi_ulong shm_tot;
4399     abi_ulong shm_rss;
4400     abi_ulong shm_swp;
4401     abi_ulong swap_attempts;
4402     abi_ulong swap_successes;
4403 };
4404 
4405 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4406                                                struct shm_info *host_shm_info)
4407 {
4408     struct target_shm_info *target_shm_info;
4409     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4410         return -TARGET_EFAULT;
4411     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4412     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4413     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4414     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4415     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4416     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4417     unlock_user_struct(target_shm_info, target_addr, 1);
4418     return 0;
4419 }
4420 
4421 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4422 {
4423     struct shmid_ds dsarg;
4424     struct shminfo shminfo;
4425     struct shm_info shm_info;
4426     abi_long ret = -TARGET_EINVAL;
4427 
4428     cmd &= 0xff;
4429 
4430     switch(cmd) {
4431     case IPC_STAT:
4432     case IPC_SET:
4433     case SHM_STAT:
4434         if (target_to_host_shmid_ds(&dsarg, buf))
4435             return -TARGET_EFAULT;
4436         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4437         if (host_to_target_shmid_ds(buf, &dsarg))
4438             return -TARGET_EFAULT;
4439         break;
4440     case IPC_INFO:
4441         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4442         if (host_to_target_shminfo(buf, &shminfo))
4443             return -TARGET_EFAULT;
4444         break;
4445     case SHM_INFO:
4446         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4447         if (host_to_target_shm_info(buf, &shm_info))
4448             return -TARGET_EFAULT;
4449         break;
4450     case IPC_RMID:
4451     case SHM_LOCK:
4452     case SHM_UNLOCK:
4453         ret = get_errno(shmctl(shmid, cmd, NULL));
4454         break;
4455     }
4456 
4457     return ret;
4458 }
4459 
4460 #ifndef TARGET_FORCE_SHMLBA
4461 /* For most architectures, SHMLBA is the same as the page size;
4462  * some architectures have larger values, in which case they should
4463  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4464  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4465  * and defining its own value for SHMLBA.
4466  *
4467  * The kernel also permits SHMLBA to be set by the architecture to a
4468  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4469  * this means that addresses are rounded to the large size if
4470  * SHM_RND is set but addresses not aligned to that size are not rejected
4471  * as long as they are at least page-aligned. Since the only architecture
4472  * which uses this is ia64 this code doesn't provide for that oddity.
4473  */
4474 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4475 {
4476     return TARGET_PAGE_SIZE;
4477 }
4478 #endif
4479 
4480 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4481                                  int shmid, abi_ulong shmaddr, int shmflg)
4482 {
4483     CPUState *cpu = env_cpu(cpu_env);
4484     abi_long raddr;
4485     void *host_raddr;
4486     struct shmid_ds shm_info;
4487     int i,ret;
4488     abi_ulong shmlba;
4489 
4490     /* shmat pointers are always untagged */
4491 
4492     /* find out the length of the shared memory segment */
4493     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4494     if (is_error(ret)) {
4495         /* can't get length, bail out */
4496         return ret;
4497     }
4498 
4499     shmlba = target_shmlba(cpu_env);
4500 
4501     if (shmaddr & (shmlba - 1)) {
4502         if (shmflg & SHM_RND) {
4503             shmaddr &= ~(shmlba - 1);
4504         } else {
4505             return -TARGET_EINVAL;
4506         }
4507     }
4508     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4509         return -TARGET_EINVAL;
4510     }
4511 
4512     mmap_lock();
4513 
4514     /*
4515      * We're mapping shared memory, so ensure we generate code for parallel
4516      * execution and flush old translations.  This will work up to the level
4517      * supported by the host -- anything that requires EXCP_ATOMIC will not
4518      * be atomic with respect to an external process.
4519      */
4520     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4521         cpu->tcg_cflags |= CF_PARALLEL;
4522         tb_flush(cpu);
4523     }
4524 
4525     if (shmaddr)
4526         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4527     else {
4528         abi_ulong mmap_start;
4529 
4530         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4531         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4532 
4533         if (mmap_start == -1) {
4534             errno = ENOMEM;
4535             host_raddr = (void *)-1;
4536         } else
4537             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4538                                shmflg | SHM_REMAP);
4539     }
4540 
4541     if (host_raddr == (void *)-1) {
4542         mmap_unlock();
4543         return get_errno((long)host_raddr);
4544     }
4545     raddr=h2g((unsigned long)host_raddr);
4546 
4547     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4548                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4549                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4550 
4551     for (i = 0; i < N_SHM_REGIONS; i++) {
4552         if (!shm_regions[i].in_use) {
4553             shm_regions[i].in_use = true;
4554             shm_regions[i].start = raddr;
4555             shm_regions[i].size = shm_info.shm_segsz;
4556             break;
4557         }
4558     }
4559 
4560     mmap_unlock();
4561     return raddr;
4562 
4563 }
4564 
4565 static inline abi_long do_shmdt(abi_ulong shmaddr)
4566 {
4567     int i;
4568     abi_long rv;
4569 
4570     /* shmdt pointers are always untagged */
4571 
4572     mmap_lock();
4573 
4574     for (i = 0; i < N_SHM_REGIONS; ++i) {
4575         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4576             shm_regions[i].in_use = false;
4577             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4578             break;
4579         }
4580     }
4581     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4582 
4583     mmap_unlock();
4584 
4585     return rv;
4586 }
4587 
4588 #ifdef TARGET_NR_ipc
4589 /* ??? This only works with linear mappings.  */
4590 /* do_ipc() must return target values and target errnos. */
4591 static abi_long do_ipc(CPUArchState *cpu_env,
4592                        unsigned int call, abi_long first,
4593                        abi_long second, abi_long third,
4594                        abi_long ptr, abi_long fifth)
4595 {
4596     int version;
4597     abi_long ret = 0;
4598 
4599     version = call >> 16;
4600     call &= 0xffff;
4601 
4602     switch (call) {
4603     case IPCOP_semop:
4604         ret = do_semtimedop(first, ptr, second, 0, false);
4605         break;
4606     case IPCOP_semtimedop:
4607     /*
4608      * The s390 sys_ipc variant has only five parameters instead of six
4609      * (as for default variant) and the only difference is the handling of
4610      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4611      * to a struct timespec where the generic variant uses fifth parameter.
4612      */
4613 #if defined(TARGET_S390X)
4614         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4615 #else
4616         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4617 #endif
4618         break;
4619 
4620     case IPCOP_semget:
4621         ret = get_errno(semget(first, second, third));
4622         break;
4623 
4624     case IPCOP_semctl: {
4625         /* The semun argument to semctl is passed by value, so dereference the
4626          * ptr argument. */
4627         abi_ulong atptr;
4628         get_user_ual(atptr, ptr);
4629         ret = do_semctl(first, second, third, atptr);
4630         break;
4631     }
4632 
4633     case IPCOP_msgget:
4634         ret = get_errno(msgget(first, second));
4635         break;
4636 
4637     case IPCOP_msgsnd:
4638         ret = do_msgsnd(first, ptr, second, third);
4639         break;
4640 
4641     case IPCOP_msgctl:
4642         ret = do_msgctl(first, second, ptr);
4643         break;
4644 
4645     case IPCOP_msgrcv:
4646         switch (version) {
4647         case 0:
4648             {
4649                 struct target_ipc_kludge {
4650                     abi_long msgp;
4651                     abi_long msgtyp;
4652                 } *tmp;
4653 
4654                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4655                     ret = -TARGET_EFAULT;
4656                     break;
4657                 }
4658 
4659                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4660 
4661                 unlock_user_struct(tmp, ptr, 0);
4662                 break;
4663             }
4664         default:
4665             ret = do_msgrcv(first, ptr, second, fifth, third);
4666         }
4667         break;
4668 
4669     case IPCOP_shmat:
4670         switch (version) {
4671         default:
4672         {
4673             abi_ulong raddr;
4674             raddr = do_shmat(cpu_env, first, ptr, second);
4675             if (is_error(raddr))
4676                 return get_errno(raddr);
4677             if (put_user_ual(raddr, third))
4678                 return -TARGET_EFAULT;
4679             break;
4680         }
4681         case 1:
4682             ret = -TARGET_EINVAL;
4683             break;
4684         }
4685 	break;
4686     case IPCOP_shmdt:
4687         ret = do_shmdt(ptr);
4688 	break;
4689 
4690     case IPCOP_shmget:
4691 	/* IPC_* flag values are the same on all linux platforms */
4692 	ret = get_errno(shmget(first, second, third));
4693 	break;
4694 
4695 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4696     case IPCOP_shmctl:
4697         ret = do_shmctl(first, second, ptr);
4698         break;
4699     default:
4700         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4701                       call, version);
4702 	ret = -TARGET_ENOSYS;
4703 	break;
4704     }
4705     return ret;
4706 }
4707 #endif
4708 
4709 /* kernel structure types definitions */
4710 
4711 #define STRUCT(name, ...) STRUCT_ ## name,
4712 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4713 enum {
4714 #include "syscall_types.h"
4715 STRUCT_MAX
4716 };
4717 #undef STRUCT
4718 #undef STRUCT_SPECIAL
4719 
4720 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4721 #define STRUCT_SPECIAL(name)
4722 #include "syscall_types.h"
4723 #undef STRUCT
4724 #undef STRUCT_SPECIAL
4725 
4726 #define MAX_STRUCT_SIZE 4096
4727 
4728 #ifdef CONFIG_FIEMAP
4729 /* So fiemap access checks don't overflow on 32 bit systems.
4730  * This is very slightly smaller than the limit imposed by
4731  * the underlying kernel.
4732  */
4733 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4734                             / sizeof(struct fiemap_extent))
4735 
4736 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4737                                        int fd, int cmd, abi_long arg)
4738 {
4739     /* The parameter for this ioctl is a struct fiemap followed
4740      * by an array of struct fiemap_extent whose size is set
4741      * in fiemap->fm_extent_count. The array is filled in by the
4742      * ioctl.
4743      */
4744     int target_size_in, target_size_out;
4745     struct fiemap *fm;
4746     const argtype *arg_type = ie->arg_type;
4747     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4748     void *argptr, *p;
4749     abi_long ret;
4750     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4751     uint32_t outbufsz;
4752     int free_fm = 0;
4753 
4754     assert(arg_type[0] == TYPE_PTR);
4755     assert(ie->access == IOC_RW);
4756     arg_type++;
4757     target_size_in = thunk_type_size(arg_type, 0);
4758     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4759     if (!argptr) {
4760         return -TARGET_EFAULT;
4761     }
4762     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4763     unlock_user(argptr, arg, 0);
4764     fm = (struct fiemap *)buf_temp;
4765     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4766         return -TARGET_EINVAL;
4767     }
4768 
4769     outbufsz = sizeof (*fm) +
4770         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4771 
4772     if (outbufsz > MAX_STRUCT_SIZE) {
4773         /* We can't fit all the extents into the fixed size buffer.
4774          * Allocate one that is large enough and use it instead.
4775          */
4776         fm = g_try_malloc(outbufsz);
4777         if (!fm) {
4778             return -TARGET_ENOMEM;
4779         }
4780         memcpy(fm, buf_temp, sizeof(struct fiemap));
4781         free_fm = 1;
4782     }
4783     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4784     if (!is_error(ret)) {
4785         target_size_out = target_size_in;
4786         /* An extent_count of 0 means we were only counting the extents
4787          * so there are no structs to copy
4788          */
4789         if (fm->fm_extent_count != 0) {
4790             target_size_out += fm->fm_mapped_extents * extent_size;
4791         }
4792         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4793         if (!argptr) {
4794             ret = -TARGET_EFAULT;
4795         } else {
4796             /* Convert the struct fiemap */
4797             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4798             if (fm->fm_extent_count != 0) {
4799                 p = argptr + target_size_in;
4800                 /* ...and then all the struct fiemap_extents */
4801                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4802                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4803                                   THUNK_TARGET);
4804                     p += extent_size;
4805                 }
4806             }
4807             unlock_user(argptr, arg, target_size_out);
4808         }
4809     }
4810     if (free_fm) {
4811         g_free(fm);
4812     }
4813     return ret;
4814 }
4815 #endif
4816 
4817 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4818                                 int fd, int cmd, abi_long arg)
4819 {
4820     const argtype *arg_type = ie->arg_type;
4821     int target_size;
4822     void *argptr;
4823     int ret;
4824     struct ifconf *host_ifconf;
4825     uint32_t outbufsz;
4826     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4827     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4828     int target_ifreq_size;
4829     int nb_ifreq;
4830     int free_buf = 0;
4831     int i;
4832     int target_ifc_len;
4833     abi_long target_ifc_buf;
4834     int host_ifc_len;
4835     char *host_ifc_buf;
4836 
4837     assert(arg_type[0] == TYPE_PTR);
4838     assert(ie->access == IOC_RW);
4839 
4840     arg_type++;
4841     target_size = thunk_type_size(arg_type, 0);
4842 
4843     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4844     if (!argptr)
4845         return -TARGET_EFAULT;
4846     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4847     unlock_user(argptr, arg, 0);
4848 
4849     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4850     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4851     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4852 
4853     if (target_ifc_buf != 0) {
4854         target_ifc_len = host_ifconf->ifc_len;
4855         nb_ifreq = target_ifc_len / target_ifreq_size;
4856         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4857 
4858         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4859         if (outbufsz > MAX_STRUCT_SIZE) {
4860             /*
4861              * We can't fit all the extents into the fixed size buffer.
4862              * Allocate one that is large enough and use it instead.
4863              */
4864             host_ifconf = malloc(outbufsz);
4865             if (!host_ifconf) {
4866                 return -TARGET_ENOMEM;
4867             }
4868             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4869             free_buf = 1;
4870         }
4871         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4872 
4873         host_ifconf->ifc_len = host_ifc_len;
4874     } else {
4875       host_ifc_buf = NULL;
4876     }
4877     host_ifconf->ifc_buf = host_ifc_buf;
4878 
4879     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4880     if (!is_error(ret)) {
4881 	/* convert host ifc_len to target ifc_len */
4882 
4883         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4884         target_ifc_len = nb_ifreq * target_ifreq_size;
4885         host_ifconf->ifc_len = target_ifc_len;
4886 
4887 	/* restore target ifc_buf */
4888 
4889         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4890 
4891 	/* copy struct ifconf to target user */
4892 
4893         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4894         if (!argptr)
4895             return -TARGET_EFAULT;
4896         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4897         unlock_user(argptr, arg, target_size);
4898 
4899         if (target_ifc_buf != 0) {
4900             /* copy ifreq[] to target user */
4901             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4902             for (i = 0; i < nb_ifreq ; i++) {
4903                 thunk_convert(argptr + i * target_ifreq_size,
4904                               host_ifc_buf + i * sizeof(struct ifreq),
4905                               ifreq_arg_type, THUNK_TARGET);
4906             }
4907             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4908         }
4909     }
4910 
4911     if (free_buf) {
4912         free(host_ifconf);
4913     }
4914 
4915     return ret;
4916 }
4917 
4918 #if defined(CONFIG_USBFS)
4919 #if HOST_LONG_BITS > 64
4920 #error USBDEVFS thunks do not support >64 bit hosts yet.
4921 #endif
4922 struct live_urb {
4923     uint64_t target_urb_adr;
4924     uint64_t target_buf_adr;
4925     char *target_buf_ptr;
4926     struct usbdevfs_urb host_urb;
4927 };
4928 
4929 static GHashTable *usbdevfs_urb_hashtable(void)
4930 {
4931     static GHashTable *urb_hashtable;
4932 
4933     if (!urb_hashtable) {
4934         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4935     }
4936     return urb_hashtable;
4937 }
4938 
4939 static void urb_hashtable_insert(struct live_urb *urb)
4940 {
4941     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4942     g_hash_table_insert(urb_hashtable, urb, urb);
4943 }
4944 
4945 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4946 {
4947     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4948     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4949 }
4950 
4951 static void urb_hashtable_remove(struct live_urb *urb)
4952 {
4953     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4954     g_hash_table_remove(urb_hashtable, urb);
4955 }
4956 
4957 static abi_long
4958 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4959                           int fd, int cmd, abi_long arg)
4960 {
4961     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4962     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4963     struct live_urb *lurb;
4964     void *argptr;
4965     uint64_t hurb;
4966     int target_size;
4967     uintptr_t target_urb_adr;
4968     abi_long ret;
4969 
4970     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4971 
4972     memset(buf_temp, 0, sizeof(uint64_t));
4973     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4974     if (is_error(ret)) {
4975         return ret;
4976     }
4977 
4978     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4979     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4980     if (!lurb->target_urb_adr) {
4981         return -TARGET_EFAULT;
4982     }
4983     urb_hashtable_remove(lurb);
4984     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4985         lurb->host_urb.buffer_length);
4986     lurb->target_buf_ptr = NULL;
4987 
4988     /* restore the guest buffer pointer */
4989     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4990 
4991     /* update the guest urb struct */
4992     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4993     if (!argptr) {
4994         g_free(lurb);
4995         return -TARGET_EFAULT;
4996     }
4997     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4998     unlock_user(argptr, lurb->target_urb_adr, target_size);
4999 
5000     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5001     /* write back the urb handle */
5002     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5003     if (!argptr) {
5004         g_free(lurb);
5005         return -TARGET_EFAULT;
5006     }
5007 
5008     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5009     target_urb_adr = lurb->target_urb_adr;
5010     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5011     unlock_user(argptr, arg, target_size);
5012 
5013     g_free(lurb);
5014     return ret;
5015 }
5016 
5017 static abi_long
5018 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5019                              uint8_t *buf_temp __attribute__((unused)),
5020                              int fd, int cmd, abi_long arg)
5021 {
5022     struct live_urb *lurb;
5023 
5024     /* map target address back to host URB with metadata. */
5025     lurb = urb_hashtable_lookup(arg);
5026     if (!lurb) {
5027         return -TARGET_EFAULT;
5028     }
5029     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5030 }
5031 
5032 static abi_long
5033 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5034                             int fd, int cmd, abi_long arg)
5035 {
5036     const argtype *arg_type = ie->arg_type;
5037     int target_size;
5038     abi_long ret;
5039     void *argptr;
5040     int rw_dir;
5041     struct live_urb *lurb;
5042 
5043     /*
5044      * each submitted URB needs to map to a unique ID for the
5045      * kernel, and that unique ID needs to be a pointer to
5046      * host memory.  hence, we need to malloc for each URB.
5047      * isochronous transfers have a variable length struct.
5048      */
5049     arg_type++;
5050     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5051 
5052     /* construct host copy of urb and metadata */
5053     lurb = g_try_malloc0(sizeof(struct live_urb));
5054     if (!lurb) {
5055         return -TARGET_ENOMEM;
5056     }
5057 
5058     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5059     if (!argptr) {
5060         g_free(lurb);
5061         return -TARGET_EFAULT;
5062     }
5063     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5064     unlock_user(argptr, arg, 0);
5065 
5066     lurb->target_urb_adr = arg;
5067     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5068 
5069     /* buffer space used depends on endpoint type so lock the entire buffer */
5070     /* control type urbs should check the buffer contents for true direction */
5071     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5072     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5073         lurb->host_urb.buffer_length, 1);
5074     if (lurb->target_buf_ptr == NULL) {
5075         g_free(lurb);
5076         return -TARGET_EFAULT;
5077     }
5078 
5079     /* update buffer pointer in host copy */
5080     lurb->host_urb.buffer = lurb->target_buf_ptr;
5081 
5082     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5083     if (is_error(ret)) {
5084         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5085         g_free(lurb);
5086     } else {
5087         urb_hashtable_insert(lurb);
5088     }
5089 
5090     return ret;
5091 }
5092 #endif /* CONFIG_USBFS */
5093 
5094 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5095                             int cmd, abi_long arg)
5096 {
5097     void *argptr;
5098     struct dm_ioctl *host_dm;
5099     abi_long guest_data;
5100     uint32_t guest_data_size;
5101     int target_size;
5102     const argtype *arg_type = ie->arg_type;
5103     abi_long ret;
5104     void *big_buf = NULL;
5105     char *host_data;
5106 
5107     arg_type++;
5108     target_size = thunk_type_size(arg_type, 0);
5109     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5110     if (!argptr) {
5111         ret = -TARGET_EFAULT;
5112         goto out;
5113     }
5114     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5115     unlock_user(argptr, arg, 0);
5116 
5117     /* buf_temp is too small, so fetch things into a bigger buffer */
5118     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5119     memcpy(big_buf, buf_temp, target_size);
5120     buf_temp = big_buf;
5121     host_dm = big_buf;
5122 
5123     guest_data = arg + host_dm->data_start;
5124     if ((guest_data - arg) < 0) {
5125         ret = -TARGET_EINVAL;
5126         goto out;
5127     }
5128     guest_data_size = host_dm->data_size - host_dm->data_start;
5129     host_data = (char*)host_dm + host_dm->data_start;
5130 
5131     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5132     if (!argptr) {
5133         ret = -TARGET_EFAULT;
5134         goto out;
5135     }
5136 
5137     switch (ie->host_cmd) {
5138     case DM_REMOVE_ALL:
5139     case DM_LIST_DEVICES:
5140     case DM_DEV_CREATE:
5141     case DM_DEV_REMOVE:
5142     case DM_DEV_SUSPEND:
5143     case DM_DEV_STATUS:
5144     case DM_DEV_WAIT:
5145     case DM_TABLE_STATUS:
5146     case DM_TABLE_CLEAR:
5147     case DM_TABLE_DEPS:
5148     case DM_LIST_VERSIONS:
5149         /* no input data */
5150         break;
5151     case DM_DEV_RENAME:
5152     case DM_DEV_SET_GEOMETRY:
5153         /* data contains only strings */
5154         memcpy(host_data, argptr, guest_data_size);
5155         break;
5156     case DM_TARGET_MSG:
5157         memcpy(host_data, argptr, guest_data_size);
5158         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5159         break;
5160     case DM_TABLE_LOAD:
5161     {
5162         void *gspec = argptr;
5163         void *cur_data = host_data;
5164         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5165         int spec_size = thunk_type_size(arg_type, 0);
5166         int i;
5167 
5168         for (i = 0; i < host_dm->target_count; i++) {
5169             struct dm_target_spec *spec = cur_data;
5170             uint32_t next;
5171             int slen;
5172 
5173             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5174             slen = strlen((char*)gspec + spec_size) + 1;
5175             next = spec->next;
5176             spec->next = sizeof(*spec) + slen;
5177             strcpy((char*)&spec[1], gspec + spec_size);
5178             gspec += next;
5179             cur_data += spec->next;
5180         }
5181         break;
5182     }
5183     default:
5184         ret = -TARGET_EINVAL;
5185         unlock_user(argptr, guest_data, 0);
5186         goto out;
5187     }
5188     unlock_user(argptr, guest_data, 0);
5189 
5190     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5191     if (!is_error(ret)) {
5192         guest_data = arg + host_dm->data_start;
5193         guest_data_size = host_dm->data_size - host_dm->data_start;
5194         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5195         switch (ie->host_cmd) {
5196         case DM_REMOVE_ALL:
5197         case DM_DEV_CREATE:
5198         case DM_DEV_REMOVE:
5199         case DM_DEV_RENAME:
5200         case DM_DEV_SUSPEND:
5201         case DM_DEV_STATUS:
5202         case DM_TABLE_LOAD:
5203         case DM_TABLE_CLEAR:
5204         case DM_TARGET_MSG:
5205         case DM_DEV_SET_GEOMETRY:
5206             /* no return data */
5207             break;
5208         case DM_LIST_DEVICES:
5209         {
5210             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5211             uint32_t remaining_data = guest_data_size;
5212             void *cur_data = argptr;
5213             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5214             int nl_size = 12; /* can't use thunk_size due to alignment */
5215 
5216             while (1) {
5217                 uint32_t next = nl->next;
5218                 if (next) {
5219                     nl->next = nl_size + (strlen(nl->name) + 1);
5220                 }
5221                 if (remaining_data < nl->next) {
5222                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5223                     break;
5224                 }
5225                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5226                 strcpy(cur_data + nl_size, nl->name);
5227                 cur_data += nl->next;
5228                 remaining_data -= nl->next;
5229                 if (!next) {
5230                     break;
5231                 }
5232                 nl = (void*)nl + next;
5233             }
5234             break;
5235         }
5236         case DM_DEV_WAIT:
5237         case DM_TABLE_STATUS:
5238         {
5239             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5240             void *cur_data = argptr;
5241             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5242             int spec_size = thunk_type_size(arg_type, 0);
5243             int i;
5244 
5245             for (i = 0; i < host_dm->target_count; i++) {
5246                 uint32_t next = spec->next;
5247                 int slen = strlen((char*)&spec[1]) + 1;
5248                 spec->next = (cur_data - argptr) + spec_size + slen;
5249                 if (guest_data_size < spec->next) {
5250                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5251                     break;
5252                 }
5253                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5254                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5255                 cur_data = argptr + spec->next;
5256                 spec = (void*)host_dm + host_dm->data_start + next;
5257             }
5258             break;
5259         }
5260         case DM_TABLE_DEPS:
5261         {
5262             void *hdata = (void*)host_dm + host_dm->data_start;
5263             int count = *(uint32_t*)hdata;
5264             uint64_t *hdev = hdata + 8;
5265             uint64_t *gdev = argptr + 8;
5266             int i;
5267 
5268             *(uint32_t*)argptr = tswap32(count);
5269             for (i = 0; i < count; i++) {
5270                 *gdev = tswap64(*hdev);
5271                 gdev++;
5272                 hdev++;
5273             }
5274             break;
5275         }
5276         case DM_LIST_VERSIONS:
5277         {
5278             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5279             uint32_t remaining_data = guest_data_size;
5280             void *cur_data = argptr;
5281             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5282             int vers_size = thunk_type_size(arg_type, 0);
5283 
5284             while (1) {
5285                 uint32_t next = vers->next;
5286                 if (next) {
5287                     vers->next = vers_size + (strlen(vers->name) + 1);
5288                 }
5289                 if (remaining_data < vers->next) {
5290                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5291                     break;
5292                 }
5293                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5294                 strcpy(cur_data + vers_size, vers->name);
5295                 cur_data += vers->next;
5296                 remaining_data -= vers->next;
5297                 if (!next) {
5298                     break;
5299                 }
5300                 vers = (void*)vers + next;
5301             }
5302             break;
5303         }
5304         default:
5305             unlock_user(argptr, guest_data, 0);
5306             ret = -TARGET_EINVAL;
5307             goto out;
5308         }
5309         unlock_user(argptr, guest_data, guest_data_size);
5310 
5311         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5312         if (!argptr) {
5313             ret = -TARGET_EFAULT;
5314             goto out;
5315         }
5316         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5317         unlock_user(argptr, arg, target_size);
5318     }
5319 out:
5320     g_free(big_buf);
5321     return ret;
5322 }
5323 
5324 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5325                                int cmd, abi_long arg)
5326 {
5327     void *argptr;
5328     int target_size;
5329     const argtype *arg_type = ie->arg_type;
5330     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5331     abi_long ret;
5332 
5333     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5334     struct blkpg_partition host_part;
5335 
5336     /* Read and convert blkpg */
5337     arg_type++;
5338     target_size = thunk_type_size(arg_type, 0);
5339     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5340     if (!argptr) {
5341         ret = -TARGET_EFAULT;
5342         goto out;
5343     }
5344     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5345     unlock_user(argptr, arg, 0);
5346 
5347     switch (host_blkpg->op) {
5348     case BLKPG_ADD_PARTITION:
5349     case BLKPG_DEL_PARTITION:
5350         /* payload is struct blkpg_partition */
5351         break;
5352     default:
5353         /* Unknown opcode */
5354         ret = -TARGET_EINVAL;
5355         goto out;
5356     }
5357 
5358     /* Read and convert blkpg->data */
5359     arg = (abi_long)(uintptr_t)host_blkpg->data;
5360     target_size = thunk_type_size(part_arg_type, 0);
5361     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5362     if (!argptr) {
5363         ret = -TARGET_EFAULT;
5364         goto out;
5365     }
5366     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5367     unlock_user(argptr, arg, 0);
5368 
5369     /* Swizzle the data pointer to our local copy and call! */
5370     host_blkpg->data = &host_part;
5371     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5372 
5373 out:
5374     return ret;
5375 }
5376 
5377 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5378                                 int fd, int cmd, abi_long arg)
5379 {
5380     const argtype *arg_type = ie->arg_type;
5381     const StructEntry *se;
5382     const argtype *field_types;
5383     const int *dst_offsets, *src_offsets;
5384     int target_size;
5385     void *argptr;
5386     abi_ulong *target_rt_dev_ptr = NULL;
5387     unsigned long *host_rt_dev_ptr = NULL;
5388     abi_long ret;
5389     int i;
5390 
5391     assert(ie->access == IOC_W);
5392     assert(*arg_type == TYPE_PTR);
5393     arg_type++;
5394     assert(*arg_type == TYPE_STRUCT);
5395     target_size = thunk_type_size(arg_type, 0);
5396     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5397     if (!argptr) {
5398         return -TARGET_EFAULT;
5399     }
5400     arg_type++;
5401     assert(*arg_type == (int)STRUCT_rtentry);
5402     se = struct_entries + *arg_type++;
5403     assert(se->convert[0] == NULL);
5404     /* convert struct here to be able to catch rt_dev string */
5405     field_types = se->field_types;
5406     dst_offsets = se->field_offsets[THUNK_HOST];
5407     src_offsets = se->field_offsets[THUNK_TARGET];
5408     for (i = 0; i < se->nb_fields; i++) {
5409         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5410             assert(*field_types == TYPE_PTRVOID);
5411             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5412             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5413             if (*target_rt_dev_ptr != 0) {
5414                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5415                                                   tswapal(*target_rt_dev_ptr));
5416                 if (!*host_rt_dev_ptr) {
5417                     unlock_user(argptr, arg, 0);
5418                     return -TARGET_EFAULT;
5419                 }
5420             } else {
5421                 *host_rt_dev_ptr = 0;
5422             }
5423             field_types++;
5424             continue;
5425         }
5426         field_types = thunk_convert(buf_temp + dst_offsets[i],
5427                                     argptr + src_offsets[i],
5428                                     field_types, THUNK_HOST);
5429     }
5430     unlock_user(argptr, arg, 0);
5431 
5432     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5433 
5434     assert(host_rt_dev_ptr != NULL);
5435     assert(target_rt_dev_ptr != NULL);
5436     if (*host_rt_dev_ptr != 0) {
5437         unlock_user((void *)*host_rt_dev_ptr,
5438                     *target_rt_dev_ptr, 0);
5439     }
5440     return ret;
5441 }
5442 
5443 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5444                                      int fd, int cmd, abi_long arg)
5445 {
5446     int sig = target_to_host_signal(arg);
5447     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5448 }
5449 
5450 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5451                                     int fd, int cmd, abi_long arg)
5452 {
5453     struct timeval tv;
5454     abi_long ret;
5455 
5456     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5457     if (is_error(ret)) {
5458         return ret;
5459     }
5460 
5461     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5462         if (copy_to_user_timeval(arg, &tv)) {
5463             return -TARGET_EFAULT;
5464         }
5465     } else {
5466         if (copy_to_user_timeval64(arg, &tv)) {
5467             return -TARGET_EFAULT;
5468         }
5469     }
5470 
5471     return ret;
5472 }
5473 
5474 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5475                                       int fd, int cmd, abi_long arg)
5476 {
5477     struct timespec ts;
5478     abi_long ret;
5479 
5480     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5481     if (is_error(ret)) {
5482         return ret;
5483     }
5484 
5485     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5486         if (host_to_target_timespec(arg, &ts)) {
5487             return -TARGET_EFAULT;
5488         }
5489     } else{
5490         if (host_to_target_timespec64(arg, &ts)) {
5491             return -TARGET_EFAULT;
5492         }
5493     }
5494 
5495     return ret;
5496 }
5497 
5498 #ifdef TIOCGPTPEER
5499 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5500                                      int fd, int cmd, abi_long arg)
5501 {
5502     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5503     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5504 }
5505 #endif
5506 
5507 #ifdef HAVE_DRM_H
5508 
5509 static void unlock_drm_version(struct drm_version *host_ver,
5510                                struct target_drm_version *target_ver,
5511                                bool copy)
5512 {
5513     unlock_user(host_ver->name, target_ver->name,
5514                                 copy ? host_ver->name_len : 0);
5515     unlock_user(host_ver->date, target_ver->date,
5516                                 copy ? host_ver->date_len : 0);
5517     unlock_user(host_ver->desc, target_ver->desc,
5518                                 copy ? host_ver->desc_len : 0);
5519 }
5520 
5521 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5522                                           struct target_drm_version *target_ver)
5523 {
5524     memset(host_ver, 0, sizeof(*host_ver));
5525 
5526     __get_user(host_ver->name_len, &target_ver->name_len);
5527     if (host_ver->name_len) {
5528         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5529                                    target_ver->name_len, 0);
5530         if (!host_ver->name) {
5531             return -EFAULT;
5532         }
5533     }
5534 
5535     __get_user(host_ver->date_len, &target_ver->date_len);
5536     if (host_ver->date_len) {
5537         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5538                                    target_ver->date_len, 0);
5539         if (!host_ver->date) {
5540             goto err;
5541         }
5542     }
5543 
5544     __get_user(host_ver->desc_len, &target_ver->desc_len);
5545     if (host_ver->desc_len) {
5546         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5547                                    target_ver->desc_len, 0);
5548         if (!host_ver->desc) {
5549             goto err;
5550         }
5551     }
5552 
5553     return 0;
5554 err:
5555     unlock_drm_version(host_ver, target_ver, false);
5556     return -EFAULT;
5557 }
5558 
5559 static inline void host_to_target_drmversion(
5560                                           struct target_drm_version *target_ver,
5561                                           struct drm_version *host_ver)
5562 {
5563     __put_user(host_ver->version_major, &target_ver->version_major);
5564     __put_user(host_ver->version_minor, &target_ver->version_minor);
5565     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5566     __put_user(host_ver->name_len, &target_ver->name_len);
5567     __put_user(host_ver->date_len, &target_ver->date_len);
5568     __put_user(host_ver->desc_len, &target_ver->desc_len);
5569     unlock_drm_version(host_ver, target_ver, true);
5570 }
5571 
5572 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5573                              int fd, int cmd, abi_long arg)
5574 {
5575     struct drm_version *ver;
5576     struct target_drm_version *target_ver;
5577     abi_long ret;
5578 
5579     switch (ie->host_cmd) {
5580     case DRM_IOCTL_VERSION:
5581         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5582             return -TARGET_EFAULT;
5583         }
5584         ver = (struct drm_version *)buf_temp;
5585         ret = target_to_host_drmversion(ver, target_ver);
5586         if (!is_error(ret)) {
5587             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5588             if (is_error(ret)) {
5589                 unlock_drm_version(ver, target_ver, false);
5590             } else {
5591                 host_to_target_drmversion(target_ver, ver);
5592             }
5593         }
5594         unlock_user_struct(target_ver, arg, 0);
5595         return ret;
5596     }
5597     return -TARGET_ENOSYS;
5598 }
5599 
5600 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5601                                            struct drm_i915_getparam *gparam,
5602                                            int fd, abi_long arg)
5603 {
5604     abi_long ret;
5605     int value;
5606     struct target_drm_i915_getparam *target_gparam;
5607 
5608     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5609         return -TARGET_EFAULT;
5610     }
5611 
5612     __get_user(gparam->param, &target_gparam->param);
5613     gparam->value = &value;
5614     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5615     put_user_s32(value, target_gparam->value);
5616 
5617     unlock_user_struct(target_gparam, arg, 0);
5618     return ret;
5619 }
5620 
5621 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5622                                   int fd, int cmd, abi_long arg)
5623 {
5624     switch (ie->host_cmd) {
5625     case DRM_IOCTL_I915_GETPARAM:
5626         return do_ioctl_drm_i915_getparam(ie,
5627                                           (struct drm_i915_getparam *)buf_temp,
5628                                           fd, arg);
5629     default:
5630         return -TARGET_ENOSYS;
5631     }
5632 }
5633 
5634 #endif
5635 
5636 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5637                                         int fd, int cmd, abi_long arg)
5638 {
5639     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5640     struct tun_filter *target_filter;
5641     char *target_addr;
5642 
5643     assert(ie->access == IOC_W);
5644 
5645     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5646     if (!target_filter) {
5647         return -TARGET_EFAULT;
5648     }
5649     filter->flags = tswap16(target_filter->flags);
5650     filter->count = tswap16(target_filter->count);
5651     unlock_user(target_filter, arg, 0);
5652 
5653     if (filter->count) {
5654         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5655             MAX_STRUCT_SIZE) {
5656             return -TARGET_EFAULT;
5657         }
5658 
5659         target_addr = lock_user(VERIFY_READ,
5660                                 arg + offsetof(struct tun_filter, addr),
5661                                 filter->count * ETH_ALEN, 1);
5662         if (!target_addr) {
5663             return -TARGET_EFAULT;
5664         }
5665         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5666         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5667     }
5668 
5669     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5670 }
5671 
5672 IOCTLEntry ioctl_entries[] = {
5673 #define IOCTL(cmd, access, ...) \
5674     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5675 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5676     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5677 #define IOCTL_IGNORE(cmd) \
5678     { TARGET_ ## cmd, 0, #cmd },
5679 #include "ioctls.h"
5680     { 0, 0, },
5681 };
5682 
5683 /* ??? Implement proper locking for ioctls.  */
5684 /* do_ioctl() Must return target values and target errnos. */
5685 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5686 {
5687     const IOCTLEntry *ie;
5688     const argtype *arg_type;
5689     abi_long ret;
5690     uint8_t buf_temp[MAX_STRUCT_SIZE];
5691     int target_size;
5692     void *argptr;
5693 
5694     ie = ioctl_entries;
5695     for(;;) {
5696         if (ie->target_cmd == 0) {
5697             qemu_log_mask(
5698                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5699             return -TARGET_ENOSYS;
5700         }
5701         if (ie->target_cmd == cmd)
5702             break;
5703         ie++;
5704     }
5705     arg_type = ie->arg_type;
5706     if (ie->do_ioctl) {
5707         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5708     } else if (!ie->host_cmd) {
5709         /* Some architectures define BSD ioctls in their headers
5710            that are not implemented in Linux.  */
5711         return -TARGET_ENOSYS;
5712     }
5713 
5714     switch(arg_type[0]) {
5715     case TYPE_NULL:
5716         /* no argument */
5717         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5718         break;
5719     case TYPE_PTRVOID:
5720     case TYPE_INT:
5721     case TYPE_LONG:
5722     case TYPE_ULONG:
5723         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5724         break;
5725     case TYPE_PTR:
5726         arg_type++;
5727         target_size = thunk_type_size(arg_type, 0);
5728         switch(ie->access) {
5729         case IOC_R:
5730             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5731             if (!is_error(ret)) {
5732                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5733                 if (!argptr)
5734                     return -TARGET_EFAULT;
5735                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5736                 unlock_user(argptr, arg, target_size);
5737             }
5738             break;
5739         case IOC_W:
5740             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5741             if (!argptr)
5742                 return -TARGET_EFAULT;
5743             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5744             unlock_user(argptr, arg, 0);
5745             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5746             break;
5747         default:
5748         case IOC_RW:
5749             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5750             if (!argptr)
5751                 return -TARGET_EFAULT;
5752             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5753             unlock_user(argptr, arg, 0);
5754             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5755             if (!is_error(ret)) {
5756                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5757                 if (!argptr)
5758                     return -TARGET_EFAULT;
5759                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5760                 unlock_user(argptr, arg, target_size);
5761             }
5762             break;
5763         }
5764         break;
5765     default:
5766         qemu_log_mask(LOG_UNIMP,
5767                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5768                       (long)cmd, arg_type[0]);
5769         ret = -TARGET_ENOSYS;
5770         break;
5771     }
5772     return ret;
5773 }
5774 
5775 static const bitmask_transtbl iflag_tbl[] = {
5776         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5777         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5778         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5779         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5780         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5781         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5782         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5783         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5784         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5785         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5786         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5787         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5788         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5789         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5790         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5791         { 0, 0, 0, 0 }
5792 };
5793 
5794 static const bitmask_transtbl oflag_tbl[] = {
5795 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5796 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5797 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5798 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5799 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5800 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5801 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5802 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5803 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5804 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5805 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5806 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5807 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5808 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5809 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5810 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5811 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5812 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5813 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5814 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5815 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5816 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5817 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5818 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5819 	{ 0, 0, 0, 0 }
5820 };
5821 
5822 static const bitmask_transtbl cflag_tbl[] = {
5823 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5824 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5825 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5826 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5827 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5828 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5829 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5830 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5831 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5832 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5833 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5834 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5835 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5836 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5837 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5838 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5839 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5840 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5841 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5842 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5843 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5844 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5845 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5846 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5847 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5848 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5849 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5850 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5851 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5852 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5853 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5854 	{ 0, 0, 0, 0 }
5855 };
5856 
5857 static const bitmask_transtbl lflag_tbl[] = {
5858   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5859   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5860   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5861   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5862   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5863   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5864   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5865   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5866   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5867   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5868   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5869   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5870   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5871   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5872   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5873   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5874   { 0, 0, 0, 0 }
5875 };
5876 
5877 static void target_to_host_termios (void *dst, const void *src)
5878 {
5879     struct host_termios *host = dst;
5880     const struct target_termios *target = src;
5881 
5882     host->c_iflag =
5883         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5884     host->c_oflag =
5885         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5886     host->c_cflag =
5887         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5888     host->c_lflag =
5889         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5890     host->c_line = target->c_line;
5891 
5892     memset(host->c_cc, 0, sizeof(host->c_cc));
5893     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5894     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5895     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5896     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5897     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5898     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5899     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5900     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5901     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5902     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5903     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5904     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5905     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5906     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5907     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5908     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5909     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5910 }
5911 
5912 static void host_to_target_termios (void *dst, const void *src)
5913 {
5914     struct target_termios *target = dst;
5915     const struct host_termios *host = src;
5916 
5917     target->c_iflag =
5918         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5919     target->c_oflag =
5920         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5921     target->c_cflag =
5922         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5923     target->c_lflag =
5924         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5925     target->c_line = host->c_line;
5926 
5927     memset(target->c_cc, 0, sizeof(target->c_cc));
5928     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5929     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5930     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5931     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5932     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5933     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5934     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5935     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5936     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5937     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5938     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5939     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5940     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5941     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5942     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5943     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5944     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5945 }
5946 
5947 static const StructEntry struct_termios_def = {
5948     .convert = { host_to_target_termios, target_to_host_termios },
5949     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5950     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5951     .print = print_termios,
5952 };
5953 
5954 static const bitmask_transtbl mmap_flags_tbl[] = {
5955     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5956     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5957     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5958     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5959       MAP_ANONYMOUS, MAP_ANONYMOUS },
5960     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5961       MAP_GROWSDOWN, MAP_GROWSDOWN },
5962     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5963       MAP_DENYWRITE, MAP_DENYWRITE },
5964     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5965       MAP_EXECUTABLE, MAP_EXECUTABLE },
5966     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5967     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5968       MAP_NORESERVE, MAP_NORESERVE },
5969     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5970     /* MAP_STACK had been ignored by the kernel for quite some time.
5971        Recognize it for the target insofar as we do not want to pass
5972        it through to the host.  */
5973     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5974     { 0, 0, 0, 0 }
5975 };
5976 
5977 /*
5978  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5979  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5980  */
5981 #if defined(TARGET_I386)
5982 
5983 /* NOTE: there is really one LDT for all the threads */
5984 static uint8_t *ldt_table;
5985 
5986 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5987 {
5988     int size;
5989     void *p;
5990 
5991     if (!ldt_table)
5992         return 0;
5993     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5994     if (size > bytecount)
5995         size = bytecount;
5996     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5997     if (!p)
5998         return -TARGET_EFAULT;
5999     /* ??? Should this by byteswapped?  */
6000     memcpy(p, ldt_table, size);
6001     unlock_user(p, ptr, size);
6002     return size;
6003 }
6004 
6005 /* XXX: add locking support */
6006 static abi_long write_ldt(CPUX86State *env,
6007                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6008 {
6009     struct target_modify_ldt_ldt_s ldt_info;
6010     struct target_modify_ldt_ldt_s *target_ldt_info;
6011     int seg_32bit, contents, read_exec_only, limit_in_pages;
6012     int seg_not_present, useable, lm;
6013     uint32_t *lp, entry_1, entry_2;
6014 
6015     if (bytecount != sizeof(ldt_info))
6016         return -TARGET_EINVAL;
6017     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6018         return -TARGET_EFAULT;
6019     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6020     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6021     ldt_info.limit = tswap32(target_ldt_info->limit);
6022     ldt_info.flags = tswap32(target_ldt_info->flags);
6023     unlock_user_struct(target_ldt_info, ptr, 0);
6024 
6025     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6026         return -TARGET_EINVAL;
6027     seg_32bit = ldt_info.flags & 1;
6028     contents = (ldt_info.flags >> 1) & 3;
6029     read_exec_only = (ldt_info.flags >> 3) & 1;
6030     limit_in_pages = (ldt_info.flags >> 4) & 1;
6031     seg_not_present = (ldt_info.flags >> 5) & 1;
6032     useable = (ldt_info.flags >> 6) & 1;
6033 #ifdef TARGET_ABI32
6034     lm = 0;
6035 #else
6036     lm = (ldt_info.flags >> 7) & 1;
6037 #endif
6038     if (contents == 3) {
6039         if (oldmode)
6040             return -TARGET_EINVAL;
6041         if (seg_not_present == 0)
6042             return -TARGET_EINVAL;
6043     }
6044     /* allocate the LDT */
6045     if (!ldt_table) {
6046         env->ldt.base = target_mmap(0,
6047                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6048                                     PROT_READ|PROT_WRITE,
6049                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6050         if (env->ldt.base == -1)
6051             return -TARGET_ENOMEM;
6052         memset(g2h_untagged(env->ldt.base), 0,
6053                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6054         env->ldt.limit = 0xffff;
6055         ldt_table = g2h_untagged(env->ldt.base);
6056     }
6057 
6058     /* NOTE: same code as Linux kernel */
6059     /* Allow LDTs to be cleared by the user. */
6060     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6061         if (oldmode ||
6062             (contents == 0		&&
6063              read_exec_only == 1	&&
6064              seg_32bit == 0		&&
6065              limit_in_pages == 0	&&
6066              seg_not_present == 1	&&
6067              useable == 0 )) {
6068             entry_1 = 0;
6069             entry_2 = 0;
6070             goto install;
6071         }
6072     }
6073 
6074     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6075         (ldt_info.limit & 0x0ffff);
6076     entry_2 = (ldt_info.base_addr & 0xff000000) |
6077         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6078         (ldt_info.limit & 0xf0000) |
6079         ((read_exec_only ^ 1) << 9) |
6080         (contents << 10) |
6081         ((seg_not_present ^ 1) << 15) |
6082         (seg_32bit << 22) |
6083         (limit_in_pages << 23) |
6084         (lm << 21) |
6085         0x7000;
6086     if (!oldmode)
6087         entry_2 |= (useable << 20);
6088 
6089     /* Install the new entry ...  */
6090 install:
6091     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6092     lp[0] = tswap32(entry_1);
6093     lp[1] = tswap32(entry_2);
6094     return 0;
6095 }
6096 
6097 /* specific and weird i386 syscalls */
6098 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6099                               unsigned long bytecount)
6100 {
6101     abi_long ret;
6102 
6103     switch (func) {
6104     case 0:
6105         ret = read_ldt(ptr, bytecount);
6106         break;
6107     case 1:
6108         ret = write_ldt(env, ptr, bytecount, 1);
6109         break;
6110     case 0x11:
6111         ret = write_ldt(env, ptr, bytecount, 0);
6112         break;
6113     default:
6114         ret = -TARGET_ENOSYS;
6115         break;
6116     }
6117     return ret;
6118 }
6119 
6120 #if defined(TARGET_ABI32)
6121 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6122 {
6123     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6124     struct target_modify_ldt_ldt_s ldt_info;
6125     struct target_modify_ldt_ldt_s *target_ldt_info;
6126     int seg_32bit, contents, read_exec_only, limit_in_pages;
6127     int seg_not_present, useable, lm;
6128     uint32_t *lp, entry_1, entry_2;
6129     int i;
6130 
6131     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6132     if (!target_ldt_info)
6133         return -TARGET_EFAULT;
6134     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6135     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6136     ldt_info.limit = tswap32(target_ldt_info->limit);
6137     ldt_info.flags = tswap32(target_ldt_info->flags);
6138     if (ldt_info.entry_number == -1) {
6139         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6140             if (gdt_table[i] == 0) {
6141                 ldt_info.entry_number = i;
6142                 target_ldt_info->entry_number = tswap32(i);
6143                 break;
6144             }
6145         }
6146     }
6147     unlock_user_struct(target_ldt_info, ptr, 1);
6148 
6149     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6150         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6151            return -TARGET_EINVAL;
6152     seg_32bit = ldt_info.flags & 1;
6153     contents = (ldt_info.flags >> 1) & 3;
6154     read_exec_only = (ldt_info.flags >> 3) & 1;
6155     limit_in_pages = (ldt_info.flags >> 4) & 1;
6156     seg_not_present = (ldt_info.flags >> 5) & 1;
6157     useable = (ldt_info.flags >> 6) & 1;
6158 #ifdef TARGET_ABI32
6159     lm = 0;
6160 #else
6161     lm = (ldt_info.flags >> 7) & 1;
6162 #endif
6163 
6164     if (contents == 3) {
6165         if (seg_not_present == 0)
6166             return -TARGET_EINVAL;
6167     }
6168 
6169     /* NOTE: same code as Linux kernel */
6170     /* Allow LDTs to be cleared by the user. */
6171     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6172         if ((contents == 0             &&
6173              read_exec_only == 1       &&
6174              seg_32bit == 0            &&
6175              limit_in_pages == 0       &&
6176              seg_not_present == 1      &&
6177              useable == 0 )) {
6178             entry_1 = 0;
6179             entry_2 = 0;
6180             goto install;
6181         }
6182     }
6183 
6184     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6185         (ldt_info.limit & 0x0ffff);
6186     entry_2 = (ldt_info.base_addr & 0xff000000) |
6187         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6188         (ldt_info.limit & 0xf0000) |
6189         ((read_exec_only ^ 1) << 9) |
6190         (contents << 10) |
6191         ((seg_not_present ^ 1) << 15) |
6192         (seg_32bit << 22) |
6193         (limit_in_pages << 23) |
6194         (useable << 20) |
6195         (lm << 21) |
6196         0x7000;
6197 
6198     /* Install the new entry ...  */
6199 install:
6200     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6201     lp[0] = tswap32(entry_1);
6202     lp[1] = tswap32(entry_2);
6203     return 0;
6204 }
6205 
6206 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6207 {
6208     struct target_modify_ldt_ldt_s *target_ldt_info;
6209     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6210     uint32_t base_addr, limit, flags;
6211     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6212     int seg_not_present, useable, lm;
6213     uint32_t *lp, entry_1, entry_2;
6214 
6215     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6216     if (!target_ldt_info)
6217         return -TARGET_EFAULT;
6218     idx = tswap32(target_ldt_info->entry_number);
6219     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6220         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6221         unlock_user_struct(target_ldt_info, ptr, 1);
6222         return -TARGET_EINVAL;
6223     }
6224     lp = (uint32_t *)(gdt_table + idx);
6225     entry_1 = tswap32(lp[0]);
6226     entry_2 = tswap32(lp[1]);
6227 
6228     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6229     contents = (entry_2 >> 10) & 3;
6230     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6231     seg_32bit = (entry_2 >> 22) & 1;
6232     limit_in_pages = (entry_2 >> 23) & 1;
6233     useable = (entry_2 >> 20) & 1;
6234 #ifdef TARGET_ABI32
6235     lm = 0;
6236 #else
6237     lm = (entry_2 >> 21) & 1;
6238 #endif
6239     flags = (seg_32bit << 0) | (contents << 1) |
6240         (read_exec_only << 3) | (limit_in_pages << 4) |
6241         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6242     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6243     base_addr = (entry_1 >> 16) |
6244         (entry_2 & 0xff000000) |
6245         ((entry_2 & 0xff) << 16);
6246     target_ldt_info->base_addr = tswapal(base_addr);
6247     target_ldt_info->limit = tswap32(limit);
6248     target_ldt_info->flags = tswap32(flags);
6249     unlock_user_struct(target_ldt_info, ptr, 1);
6250     return 0;
6251 }
6252 
6253 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6254 {
6255     return -TARGET_ENOSYS;
6256 }
6257 #else
6258 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6259 {
6260     abi_long ret = 0;
6261     abi_ulong val;
6262     int idx;
6263 
6264     switch(code) {
6265     case TARGET_ARCH_SET_GS:
6266     case TARGET_ARCH_SET_FS:
6267         if (code == TARGET_ARCH_SET_GS)
6268             idx = R_GS;
6269         else
6270             idx = R_FS;
6271         cpu_x86_load_seg(env, idx, 0);
6272         env->segs[idx].base = addr;
6273         break;
6274     case TARGET_ARCH_GET_GS:
6275     case TARGET_ARCH_GET_FS:
6276         if (code == TARGET_ARCH_GET_GS)
6277             idx = R_GS;
6278         else
6279             idx = R_FS;
6280         val = env->segs[idx].base;
6281         if (put_user(val, addr, abi_ulong))
6282             ret = -TARGET_EFAULT;
6283         break;
6284     default:
6285         ret = -TARGET_EINVAL;
6286         break;
6287     }
6288     return ret;
6289 }
6290 #endif /* defined(TARGET_ABI32 */
6291 
6292 #endif /* defined(TARGET_I386) */
6293 
6294 #define NEW_STACK_SIZE 0x40000
6295 
6296 
6297 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6298 typedef struct {
6299     CPUArchState *env;
6300     pthread_mutex_t mutex;
6301     pthread_cond_t cond;
6302     pthread_t thread;
6303     uint32_t tid;
6304     abi_ulong child_tidptr;
6305     abi_ulong parent_tidptr;
6306     sigset_t sigmask;
6307 } new_thread_info;
6308 
6309 static void *clone_func(void *arg)
6310 {
6311     new_thread_info *info = arg;
6312     CPUArchState *env;
6313     CPUState *cpu;
6314     TaskState *ts;
6315 
6316     rcu_register_thread();
6317     tcg_register_thread();
6318     env = info->env;
6319     cpu = env_cpu(env);
6320     thread_cpu = cpu;
6321     ts = (TaskState *)cpu->opaque;
6322     info->tid = sys_gettid();
6323     task_settid(ts);
6324     if (info->child_tidptr)
6325         put_user_u32(info->tid, info->child_tidptr);
6326     if (info->parent_tidptr)
6327         put_user_u32(info->tid, info->parent_tidptr);
6328     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6329     /* Enable signals.  */
6330     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6331     /* Signal to the parent that we're ready.  */
6332     pthread_mutex_lock(&info->mutex);
6333     pthread_cond_broadcast(&info->cond);
6334     pthread_mutex_unlock(&info->mutex);
6335     /* Wait until the parent has finished initializing the tls state.  */
6336     pthread_mutex_lock(&clone_lock);
6337     pthread_mutex_unlock(&clone_lock);
6338     cpu_loop(env);
6339     /* never exits */
6340     return NULL;
6341 }
6342 
6343 /* do_fork() Must return host values and target errnos (unlike most
6344    do_*() functions). */
6345 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6346                    abi_ulong parent_tidptr, target_ulong newtls,
6347                    abi_ulong child_tidptr)
6348 {
6349     CPUState *cpu = env_cpu(env);
6350     int ret;
6351     TaskState *ts;
6352     CPUState *new_cpu;
6353     CPUArchState *new_env;
6354     sigset_t sigmask;
6355 
6356     flags &= ~CLONE_IGNORED_FLAGS;
6357 
6358     /* Emulate vfork() with fork() */
6359     if (flags & CLONE_VFORK)
6360         flags &= ~(CLONE_VFORK | CLONE_VM);
6361 
6362     if (flags & CLONE_VM) {
6363         TaskState *parent_ts = (TaskState *)cpu->opaque;
6364         new_thread_info info;
6365         pthread_attr_t attr;
6366 
6367         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6368             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6369             return -TARGET_EINVAL;
6370         }
6371 
6372         ts = g_new0(TaskState, 1);
6373         init_task_state(ts);
6374 
6375         /* Grab a mutex so that thread setup appears atomic.  */
6376         pthread_mutex_lock(&clone_lock);
6377 
6378         /*
6379          * If this is our first additional thread, we need to ensure we
6380          * generate code for parallel execution and flush old translations.
6381          * Do this now so that the copy gets CF_PARALLEL too.
6382          */
6383         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6384             cpu->tcg_cflags |= CF_PARALLEL;
6385             tb_flush(cpu);
6386         }
6387 
6388         /* we create a new CPU instance. */
6389         new_env = cpu_copy(env);
6390         /* Init regs that differ from the parent.  */
6391         cpu_clone_regs_child(new_env, newsp, flags);
6392         cpu_clone_regs_parent(env, flags);
6393         new_cpu = env_cpu(new_env);
6394         new_cpu->opaque = ts;
6395         ts->bprm = parent_ts->bprm;
6396         ts->info = parent_ts->info;
6397         ts->signal_mask = parent_ts->signal_mask;
6398 
6399         if (flags & CLONE_CHILD_CLEARTID) {
6400             ts->child_tidptr = child_tidptr;
6401         }
6402 
6403         if (flags & CLONE_SETTLS) {
6404             cpu_set_tls (new_env, newtls);
6405         }
6406 
6407         memset(&info, 0, sizeof(info));
6408         pthread_mutex_init(&info.mutex, NULL);
6409         pthread_mutex_lock(&info.mutex);
6410         pthread_cond_init(&info.cond, NULL);
6411         info.env = new_env;
6412         if (flags & CLONE_CHILD_SETTID) {
6413             info.child_tidptr = child_tidptr;
6414         }
6415         if (flags & CLONE_PARENT_SETTID) {
6416             info.parent_tidptr = parent_tidptr;
6417         }
6418 
6419         ret = pthread_attr_init(&attr);
6420         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6421         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6422         /* It is not safe to deliver signals until the child has finished
6423            initializing, so temporarily block all signals.  */
6424         sigfillset(&sigmask);
6425         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6426         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6427 
6428         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6429         /* TODO: Free new CPU state if thread creation failed.  */
6430 
6431         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6432         pthread_attr_destroy(&attr);
6433         if (ret == 0) {
6434             /* Wait for the child to initialize.  */
6435             pthread_cond_wait(&info.cond, &info.mutex);
6436             ret = info.tid;
6437         } else {
6438             ret = -1;
6439         }
6440         pthread_mutex_unlock(&info.mutex);
6441         pthread_cond_destroy(&info.cond);
6442         pthread_mutex_destroy(&info.mutex);
6443         pthread_mutex_unlock(&clone_lock);
6444     } else {
6445         /* if no CLONE_VM, we consider it is a fork */
6446         if (flags & CLONE_INVALID_FORK_FLAGS) {
6447             return -TARGET_EINVAL;
6448         }
6449 
6450         /* We can't support custom termination signals */
6451         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6452             return -TARGET_EINVAL;
6453         }
6454 
6455         if (block_signals()) {
6456             return -TARGET_ERESTARTSYS;
6457         }
6458 
6459         fork_start();
6460         ret = fork();
6461         if (ret == 0) {
6462             /* Child Process.  */
6463             cpu_clone_regs_child(env, newsp, flags);
6464             fork_end(1);
6465             /* There is a race condition here.  The parent process could
6466                theoretically read the TID in the child process before the child
6467                tid is set.  This would require using either ptrace
6468                (not implemented) or having *_tidptr to point at a shared memory
6469                mapping.  We can't repeat the spinlock hack used above because
6470                the child process gets its own copy of the lock.  */
6471             if (flags & CLONE_CHILD_SETTID)
6472                 put_user_u32(sys_gettid(), child_tidptr);
6473             if (flags & CLONE_PARENT_SETTID)
6474                 put_user_u32(sys_gettid(), parent_tidptr);
6475             ts = (TaskState *)cpu->opaque;
6476             if (flags & CLONE_SETTLS)
6477                 cpu_set_tls (env, newtls);
6478             if (flags & CLONE_CHILD_CLEARTID)
6479                 ts->child_tidptr = child_tidptr;
6480         } else {
6481             cpu_clone_regs_parent(env, flags);
6482             fork_end(0);
6483         }
6484     }
6485     return ret;
6486 }
6487 
6488 /* warning : doesn't handle linux specific flags... */
6489 static int target_to_host_fcntl_cmd(int cmd)
6490 {
6491     int ret;
6492 
6493     switch(cmd) {
6494     case TARGET_F_DUPFD:
6495     case TARGET_F_GETFD:
6496     case TARGET_F_SETFD:
6497     case TARGET_F_GETFL:
6498     case TARGET_F_SETFL:
6499     case TARGET_F_OFD_GETLK:
6500     case TARGET_F_OFD_SETLK:
6501     case TARGET_F_OFD_SETLKW:
6502         ret = cmd;
6503         break;
6504     case TARGET_F_GETLK:
6505         ret = F_GETLK64;
6506         break;
6507     case TARGET_F_SETLK:
6508         ret = F_SETLK64;
6509         break;
6510     case TARGET_F_SETLKW:
6511         ret = F_SETLKW64;
6512         break;
6513     case TARGET_F_GETOWN:
6514         ret = F_GETOWN;
6515         break;
6516     case TARGET_F_SETOWN:
6517         ret = F_SETOWN;
6518         break;
6519     case TARGET_F_GETSIG:
6520         ret = F_GETSIG;
6521         break;
6522     case TARGET_F_SETSIG:
6523         ret = F_SETSIG;
6524         break;
6525 #if TARGET_ABI_BITS == 32
6526     case TARGET_F_GETLK64:
6527         ret = F_GETLK64;
6528         break;
6529     case TARGET_F_SETLK64:
6530         ret = F_SETLK64;
6531         break;
6532     case TARGET_F_SETLKW64:
6533         ret = F_SETLKW64;
6534         break;
6535 #endif
6536     case TARGET_F_SETLEASE:
6537         ret = F_SETLEASE;
6538         break;
6539     case TARGET_F_GETLEASE:
6540         ret = F_GETLEASE;
6541         break;
6542 #ifdef F_DUPFD_CLOEXEC
6543     case TARGET_F_DUPFD_CLOEXEC:
6544         ret = F_DUPFD_CLOEXEC;
6545         break;
6546 #endif
6547     case TARGET_F_NOTIFY:
6548         ret = F_NOTIFY;
6549         break;
6550 #ifdef F_GETOWN_EX
6551     case TARGET_F_GETOWN_EX:
6552         ret = F_GETOWN_EX;
6553         break;
6554 #endif
6555 #ifdef F_SETOWN_EX
6556     case TARGET_F_SETOWN_EX:
6557         ret = F_SETOWN_EX;
6558         break;
6559 #endif
6560 #ifdef F_SETPIPE_SZ
6561     case TARGET_F_SETPIPE_SZ:
6562         ret = F_SETPIPE_SZ;
6563         break;
6564     case TARGET_F_GETPIPE_SZ:
6565         ret = F_GETPIPE_SZ;
6566         break;
6567 #endif
6568 #ifdef F_ADD_SEALS
6569     case TARGET_F_ADD_SEALS:
6570         ret = F_ADD_SEALS;
6571         break;
6572     case TARGET_F_GET_SEALS:
6573         ret = F_GET_SEALS;
6574         break;
6575 #endif
6576     default:
6577         ret = -TARGET_EINVAL;
6578         break;
6579     }
6580 
6581 #if defined(__powerpc64__)
6582     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6583      * is not supported by kernel. The glibc fcntl call actually adjusts
6584      * them to 5, 6 and 7 before making the syscall(). Since we make the
6585      * syscall directly, adjust to what is supported by the kernel.
6586      */
6587     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6588         ret -= F_GETLK64 - 5;
6589     }
6590 #endif
6591 
6592     return ret;
6593 }
6594 
6595 #define FLOCK_TRANSTBL \
6596     switch (type) { \
6597     TRANSTBL_CONVERT(F_RDLCK); \
6598     TRANSTBL_CONVERT(F_WRLCK); \
6599     TRANSTBL_CONVERT(F_UNLCK); \
6600     }
6601 
6602 static int target_to_host_flock(int type)
6603 {
6604 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6605     FLOCK_TRANSTBL
6606 #undef  TRANSTBL_CONVERT
6607     return -TARGET_EINVAL;
6608 }
6609 
6610 static int host_to_target_flock(int type)
6611 {
6612 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6613     FLOCK_TRANSTBL
6614 #undef  TRANSTBL_CONVERT
6615     /* if we don't know how to convert the value coming
6616      * from the host we copy to the target field as-is
6617      */
6618     return type;
6619 }
6620 
6621 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6622                                             abi_ulong target_flock_addr)
6623 {
6624     struct target_flock *target_fl;
6625     int l_type;
6626 
6627     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6628         return -TARGET_EFAULT;
6629     }
6630 
6631     __get_user(l_type, &target_fl->l_type);
6632     l_type = target_to_host_flock(l_type);
6633     if (l_type < 0) {
6634         return l_type;
6635     }
6636     fl->l_type = l_type;
6637     __get_user(fl->l_whence, &target_fl->l_whence);
6638     __get_user(fl->l_start, &target_fl->l_start);
6639     __get_user(fl->l_len, &target_fl->l_len);
6640     __get_user(fl->l_pid, &target_fl->l_pid);
6641     unlock_user_struct(target_fl, target_flock_addr, 0);
6642     return 0;
6643 }
6644 
6645 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6646                                           const struct flock64 *fl)
6647 {
6648     struct target_flock *target_fl;
6649     short l_type;
6650 
6651     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6652         return -TARGET_EFAULT;
6653     }
6654 
6655     l_type = host_to_target_flock(fl->l_type);
6656     __put_user(l_type, &target_fl->l_type);
6657     __put_user(fl->l_whence, &target_fl->l_whence);
6658     __put_user(fl->l_start, &target_fl->l_start);
6659     __put_user(fl->l_len, &target_fl->l_len);
6660     __put_user(fl->l_pid, &target_fl->l_pid);
6661     unlock_user_struct(target_fl, target_flock_addr, 1);
6662     return 0;
6663 }
6664 
6665 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6666 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6667 
6668 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6669 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6670                                                    abi_ulong target_flock_addr)
6671 {
6672     struct target_oabi_flock64 *target_fl;
6673     int l_type;
6674 
6675     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6676         return -TARGET_EFAULT;
6677     }
6678 
6679     __get_user(l_type, &target_fl->l_type);
6680     l_type = target_to_host_flock(l_type);
6681     if (l_type < 0) {
6682         return l_type;
6683     }
6684     fl->l_type = l_type;
6685     __get_user(fl->l_whence, &target_fl->l_whence);
6686     __get_user(fl->l_start, &target_fl->l_start);
6687     __get_user(fl->l_len, &target_fl->l_len);
6688     __get_user(fl->l_pid, &target_fl->l_pid);
6689     unlock_user_struct(target_fl, target_flock_addr, 0);
6690     return 0;
6691 }
6692 
6693 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6694                                                  const struct flock64 *fl)
6695 {
6696     struct target_oabi_flock64 *target_fl;
6697     short l_type;
6698 
6699     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6700         return -TARGET_EFAULT;
6701     }
6702 
6703     l_type = host_to_target_flock(fl->l_type);
6704     __put_user(l_type, &target_fl->l_type);
6705     __put_user(fl->l_whence, &target_fl->l_whence);
6706     __put_user(fl->l_start, &target_fl->l_start);
6707     __put_user(fl->l_len, &target_fl->l_len);
6708     __put_user(fl->l_pid, &target_fl->l_pid);
6709     unlock_user_struct(target_fl, target_flock_addr, 1);
6710     return 0;
6711 }
6712 #endif
6713 
6714 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6715                                               abi_ulong target_flock_addr)
6716 {
6717     struct target_flock64 *target_fl;
6718     int l_type;
6719 
6720     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6721         return -TARGET_EFAULT;
6722     }
6723 
6724     __get_user(l_type, &target_fl->l_type);
6725     l_type = target_to_host_flock(l_type);
6726     if (l_type < 0) {
6727         return l_type;
6728     }
6729     fl->l_type = l_type;
6730     __get_user(fl->l_whence, &target_fl->l_whence);
6731     __get_user(fl->l_start, &target_fl->l_start);
6732     __get_user(fl->l_len, &target_fl->l_len);
6733     __get_user(fl->l_pid, &target_fl->l_pid);
6734     unlock_user_struct(target_fl, target_flock_addr, 0);
6735     return 0;
6736 }
6737 
6738 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6739                                             const struct flock64 *fl)
6740 {
6741     struct target_flock64 *target_fl;
6742     short l_type;
6743 
6744     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6745         return -TARGET_EFAULT;
6746     }
6747 
6748     l_type = host_to_target_flock(fl->l_type);
6749     __put_user(l_type, &target_fl->l_type);
6750     __put_user(fl->l_whence, &target_fl->l_whence);
6751     __put_user(fl->l_start, &target_fl->l_start);
6752     __put_user(fl->l_len, &target_fl->l_len);
6753     __put_user(fl->l_pid, &target_fl->l_pid);
6754     unlock_user_struct(target_fl, target_flock_addr, 1);
6755     return 0;
6756 }
6757 
6758 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6759 {
6760     struct flock64 fl64;
6761 #ifdef F_GETOWN_EX
6762     struct f_owner_ex fox;
6763     struct target_f_owner_ex *target_fox;
6764 #endif
6765     abi_long ret;
6766     int host_cmd = target_to_host_fcntl_cmd(cmd);
6767 
6768     if (host_cmd == -TARGET_EINVAL)
6769 	    return host_cmd;
6770 
6771     switch(cmd) {
6772     case TARGET_F_GETLK:
6773         ret = copy_from_user_flock(&fl64, arg);
6774         if (ret) {
6775             return ret;
6776         }
6777         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6778         if (ret == 0) {
6779             ret = copy_to_user_flock(arg, &fl64);
6780         }
6781         break;
6782 
6783     case TARGET_F_SETLK:
6784     case TARGET_F_SETLKW:
6785         ret = copy_from_user_flock(&fl64, arg);
6786         if (ret) {
6787             return ret;
6788         }
6789         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6790         break;
6791 
6792     case TARGET_F_GETLK64:
6793     case TARGET_F_OFD_GETLK:
6794         ret = copy_from_user_flock64(&fl64, arg);
6795         if (ret) {
6796             return ret;
6797         }
6798         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6799         if (ret == 0) {
6800             ret = copy_to_user_flock64(arg, &fl64);
6801         }
6802         break;
6803     case TARGET_F_SETLK64:
6804     case TARGET_F_SETLKW64:
6805     case TARGET_F_OFD_SETLK:
6806     case TARGET_F_OFD_SETLKW:
6807         ret = copy_from_user_flock64(&fl64, arg);
6808         if (ret) {
6809             return ret;
6810         }
6811         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6812         break;
6813 
6814     case TARGET_F_GETFL:
6815         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6816         if (ret >= 0) {
6817             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6818         }
6819         break;
6820 
6821     case TARGET_F_SETFL:
6822         ret = get_errno(safe_fcntl(fd, host_cmd,
6823                                    target_to_host_bitmask(arg,
6824                                                           fcntl_flags_tbl)));
6825         break;
6826 
6827 #ifdef F_GETOWN_EX
6828     case TARGET_F_GETOWN_EX:
6829         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6830         if (ret >= 0) {
6831             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6832                 return -TARGET_EFAULT;
6833             target_fox->type = tswap32(fox.type);
6834             target_fox->pid = tswap32(fox.pid);
6835             unlock_user_struct(target_fox, arg, 1);
6836         }
6837         break;
6838 #endif
6839 
6840 #ifdef F_SETOWN_EX
6841     case TARGET_F_SETOWN_EX:
6842         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6843             return -TARGET_EFAULT;
6844         fox.type = tswap32(target_fox->type);
6845         fox.pid = tswap32(target_fox->pid);
6846         unlock_user_struct(target_fox, arg, 0);
6847         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6848         break;
6849 #endif
6850 
6851     case TARGET_F_SETSIG:
6852         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6853         break;
6854 
6855     case TARGET_F_GETSIG:
6856         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6857         break;
6858 
6859     case TARGET_F_SETOWN:
6860     case TARGET_F_GETOWN:
6861     case TARGET_F_SETLEASE:
6862     case TARGET_F_GETLEASE:
6863     case TARGET_F_SETPIPE_SZ:
6864     case TARGET_F_GETPIPE_SZ:
6865     case TARGET_F_ADD_SEALS:
6866     case TARGET_F_GET_SEALS:
6867         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6868         break;
6869 
6870     default:
6871         ret = get_errno(safe_fcntl(fd, cmd, arg));
6872         break;
6873     }
6874     return ret;
6875 }
6876 
6877 #ifdef USE_UID16
6878 
6879 static inline int high2lowuid(int uid)
6880 {
6881     if (uid > 65535)
6882         return 65534;
6883     else
6884         return uid;
6885 }
6886 
6887 static inline int high2lowgid(int gid)
6888 {
6889     if (gid > 65535)
6890         return 65534;
6891     else
6892         return gid;
6893 }
6894 
6895 static inline int low2highuid(int uid)
6896 {
6897     if ((int16_t)uid == -1)
6898         return -1;
6899     else
6900         return uid;
6901 }
6902 
6903 static inline int low2highgid(int gid)
6904 {
6905     if ((int16_t)gid == -1)
6906         return -1;
6907     else
6908         return gid;
6909 }
6910 static inline int tswapid(int id)
6911 {
6912     return tswap16(id);
6913 }
6914 
6915 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6916 
6917 #else /* !USE_UID16 */
6918 static inline int high2lowuid(int uid)
6919 {
6920     return uid;
6921 }
6922 static inline int high2lowgid(int gid)
6923 {
6924     return gid;
6925 }
6926 static inline int low2highuid(int uid)
6927 {
6928     return uid;
6929 }
6930 static inline int low2highgid(int gid)
6931 {
6932     return gid;
6933 }
6934 static inline int tswapid(int id)
6935 {
6936     return tswap32(id);
6937 }
6938 
6939 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6940 
6941 #endif /* USE_UID16 */
6942 
6943 /* We must do direct syscalls for setting UID/GID, because we want to
6944  * implement the Linux system call semantics of "change only for this thread",
6945  * not the libc/POSIX semantics of "change for all threads in process".
6946  * (See http://ewontfix.com/17/ for more details.)
6947  * We use the 32-bit version of the syscalls if present; if it is not
6948  * then either the host architecture supports 32-bit UIDs natively with
6949  * the standard syscall, or the 16-bit UID is the best we can do.
6950  */
6951 #ifdef __NR_setuid32
6952 #define __NR_sys_setuid __NR_setuid32
6953 #else
6954 #define __NR_sys_setuid __NR_setuid
6955 #endif
6956 #ifdef __NR_setgid32
6957 #define __NR_sys_setgid __NR_setgid32
6958 #else
6959 #define __NR_sys_setgid __NR_setgid
6960 #endif
6961 #ifdef __NR_setresuid32
6962 #define __NR_sys_setresuid __NR_setresuid32
6963 #else
6964 #define __NR_sys_setresuid __NR_setresuid
6965 #endif
6966 #ifdef __NR_setresgid32
6967 #define __NR_sys_setresgid __NR_setresgid32
6968 #else
6969 #define __NR_sys_setresgid __NR_setresgid
6970 #endif
6971 
6972 _syscall1(int, sys_setuid, uid_t, uid)
6973 _syscall1(int, sys_setgid, gid_t, gid)
6974 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6975 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6976 
6977 void syscall_init(void)
6978 {
6979     IOCTLEntry *ie;
6980     const argtype *arg_type;
6981     int size;
6982 
6983     thunk_init(STRUCT_MAX);
6984 
6985 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6986 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6987 #include "syscall_types.h"
6988 #undef STRUCT
6989 #undef STRUCT_SPECIAL
6990 
6991     /* we patch the ioctl size if necessary. We rely on the fact that
6992        no ioctl has all the bits at '1' in the size field */
6993     ie = ioctl_entries;
6994     while (ie->target_cmd != 0) {
6995         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6996             TARGET_IOC_SIZEMASK) {
6997             arg_type = ie->arg_type;
6998             if (arg_type[0] != TYPE_PTR) {
6999                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7000                         ie->target_cmd);
7001                 exit(1);
7002             }
7003             arg_type++;
7004             size = thunk_type_size(arg_type, 0);
7005             ie->target_cmd = (ie->target_cmd &
7006                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7007                 (size << TARGET_IOC_SIZESHIFT);
7008         }
7009 
7010         /* automatic consistency check if same arch */
7011 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7012     (defined(__x86_64__) && defined(TARGET_X86_64))
7013         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7014             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7015                     ie->name, ie->target_cmd, ie->host_cmd);
7016         }
7017 #endif
7018         ie++;
7019     }
7020 }
7021 
7022 #ifdef TARGET_NR_truncate64
7023 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7024                                          abi_long arg2,
7025                                          abi_long arg3,
7026                                          abi_long arg4)
7027 {
7028     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7029         arg2 = arg3;
7030         arg3 = arg4;
7031     }
7032     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7033 }
7034 #endif
7035 
7036 #ifdef TARGET_NR_ftruncate64
7037 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7038                                           abi_long arg2,
7039                                           abi_long arg3,
7040                                           abi_long arg4)
7041 {
7042     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7043         arg2 = arg3;
7044         arg3 = arg4;
7045     }
7046     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7047 }
7048 #endif
7049 
7050 #if defined(TARGET_NR_timer_settime) || \
7051     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7052 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7053                                                  abi_ulong target_addr)
7054 {
7055     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7056                                 offsetof(struct target_itimerspec,
7057                                          it_interval)) ||
7058         target_to_host_timespec(&host_its->it_value, target_addr +
7059                                 offsetof(struct target_itimerspec,
7060                                          it_value))) {
7061         return -TARGET_EFAULT;
7062     }
7063 
7064     return 0;
7065 }
7066 #endif
7067 
7068 #if defined(TARGET_NR_timer_settime64) || \
7069     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7070 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7071                                                    abi_ulong target_addr)
7072 {
7073     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7074                                   offsetof(struct target__kernel_itimerspec,
7075                                            it_interval)) ||
7076         target_to_host_timespec64(&host_its->it_value, target_addr +
7077                                   offsetof(struct target__kernel_itimerspec,
7078                                            it_value))) {
7079         return -TARGET_EFAULT;
7080     }
7081 
7082     return 0;
7083 }
7084 #endif
7085 
7086 #if ((defined(TARGET_NR_timerfd_gettime) || \
7087       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7088       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7089 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7090                                                  struct itimerspec *host_its)
7091 {
7092     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7093                                                        it_interval),
7094                                 &host_its->it_interval) ||
7095         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7096                                                        it_value),
7097                                 &host_its->it_value)) {
7098         return -TARGET_EFAULT;
7099     }
7100     return 0;
7101 }
7102 #endif
7103 
7104 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7105       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7106       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7107 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7108                                                    struct itimerspec *host_its)
7109 {
7110     if (host_to_target_timespec64(target_addr +
7111                                   offsetof(struct target__kernel_itimerspec,
7112                                            it_interval),
7113                                   &host_its->it_interval) ||
7114         host_to_target_timespec64(target_addr +
7115                                   offsetof(struct target__kernel_itimerspec,
7116                                            it_value),
7117                                   &host_its->it_value)) {
7118         return -TARGET_EFAULT;
7119     }
7120     return 0;
7121 }
7122 #endif
7123 
7124 #if defined(TARGET_NR_adjtimex) || \
7125     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7126 static inline abi_long target_to_host_timex(struct timex *host_tx,
7127                                             abi_long target_addr)
7128 {
7129     struct target_timex *target_tx;
7130 
7131     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7132         return -TARGET_EFAULT;
7133     }
7134 
7135     __get_user(host_tx->modes, &target_tx->modes);
7136     __get_user(host_tx->offset, &target_tx->offset);
7137     __get_user(host_tx->freq, &target_tx->freq);
7138     __get_user(host_tx->maxerror, &target_tx->maxerror);
7139     __get_user(host_tx->esterror, &target_tx->esterror);
7140     __get_user(host_tx->status, &target_tx->status);
7141     __get_user(host_tx->constant, &target_tx->constant);
7142     __get_user(host_tx->precision, &target_tx->precision);
7143     __get_user(host_tx->tolerance, &target_tx->tolerance);
7144     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7145     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7146     __get_user(host_tx->tick, &target_tx->tick);
7147     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7148     __get_user(host_tx->jitter, &target_tx->jitter);
7149     __get_user(host_tx->shift, &target_tx->shift);
7150     __get_user(host_tx->stabil, &target_tx->stabil);
7151     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7152     __get_user(host_tx->calcnt, &target_tx->calcnt);
7153     __get_user(host_tx->errcnt, &target_tx->errcnt);
7154     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7155     __get_user(host_tx->tai, &target_tx->tai);
7156 
7157     unlock_user_struct(target_tx, target_addr, 0);
7158     return 0;
7159 }
7160 
7161 static inline abi_long host_to_target_timex(abi_long target_addr,
7162                                             struct timex *host_tx)
7163 {
7164     struct target_timex *target_tx;
7165 
7166     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7167         return -TARGET_EFAULT;
7168     }
7169 
7170     __put_user(host_tx->modes, &target_tx->modes);
7171     __put_user(host_tx->offset, &target_tx->offset);
7172     __put_user(host_tx->freq, &target_tx->freq);
7173     __put_user(host_tx->maxerror, &target_tx->maxerror);
7174     __put_user(host_tx->esterror, &target_tx->esterror);
7175     __put_user(host_tx->status, &target_tx->status);
7176     __put_user(host_tx->constant, &target_tx->constant);
7177     __put_user(host_tx->precision, &target_tx->precision);
7178     __put_user(host_tx->tolerance, &target_tx->tolerance);
7179     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7180     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7181     __put_user(host_tx->tick, &target_tx->tick);
7182     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7183     __put_user(host_tx->jitter, &target_tx->jitter);
7184     __put_user(host_tx->shift, &target_tx->shift);
7185     __put_user(host_tx->stabil, &target_tx->stabil);
7186     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7187     __put_user(host_tx->calcnt, &target_tx->calcnt);
7188     __put_user(host_tx->errcnt, &target_tx->errcnt);
7189     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7190     __put_user(host_tx->tai, &target_tx->tai);
7191 
7192     unlock_user_struct(target_tx, target_addr, 1);
7193     return 0;
7194 }
7195 #endif
7196 
7197 
7198 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7199 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7200                                               abi_long target_addr)
7201 {
7202     struct target__kernel_timex *target_tx;
7203 
7204     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7205                                  offsetof(struct target__kernel_timex,
7206                                           time))) {
7207         return -TARGET_EFAULT;
7208     }
7209 
7210     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7211         return -TARGET_EFAULT;
7212     }
7213 
7214     __get_user(host_tx->modes, &target_tx->modes);
7215     __get_user(host_tx->offset, &target_tx->offset);
7216     __get_user(host_tx->freq, &target_tx->freq);
7217     __get_user(host_tx->maxerror, &target_tx->maxerror);
7218     __get_user(host_tx->esterror, &target_tx->esterror);
7219     __get_user(host_tx->status, &target_tx->status);
7220     __get_user(host_tx->constant, &target_tx->constant);
7221     __get_user(host_tx->precision, &target_tx->precision);
7222     __get_user(host_tx->tolerance, &target_tx->tolerance);
7223     __get_user(host_tx->tick, &target_tx->tick);
7224     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7225     __get_user(host_tx->jitter, &target_tx->jitter);
7226     __get_user(host_tx->shift, &target_tx->shift);
7227     __get_user(host_tx->stabil, &target_tx->stabil);
7228     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7229     __get_user(host_tx->calcnt, &target_tx->calcnt);
7230     __get_user(host_tx->errcnt, &target_tx->errcnt);
7231     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7232     __get_user(host_tx->tai, &target_tx->tai);
7233 
7234     unlock_user_struct(target_tx, target_addr, 0);
7235     return 0;
7236 }
7237 
7238 static inline abi_long host_to_target_timex64(abi_long target_addr,
7239                                               struct timex *host_tx)
7240 {
7241     struct target__kernel_timex *target_tx;
7242 
7243    if (copy_to_user_timeval64(target_addr +
7244                               offsetof(struct target__kernel_timex, time),
7245                               &host_tx->time)) {
7246         return -TARGET_EFAULT;
7247     }
7248 
7249     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7250         return -TARGET_EFAULT;
7251     }
7252 
7253     __put_user(host_tx->modes, &target_tx->modes);
7254     __put_user(host_tx->offset, &target_tx->offset);
7255     __put_user(host_tx->freq, &target_tx->freq);
7256     __put_user(host_tx->maxerror, &target_tx->maxerror);
7257     __put_user(host_tx->esterror, &target_tx->esterror);
7258     __put_user(host_tx->status, &target_tx->status);
7259     __put_user(host_tx->constant, &target_tx->constant);
7260     __put_user(host_tx->precision, &target_tx->precision);
7261     __put_user(host_tx->tolerance, &target_tx->tolerance);
7262     __put_user(host_tx->tick, &target_tx->tick);
7263     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7264     __put_user(host_tx->jitter, &target_tx->jitter);
7265     __put_user(host_tx->shift, &target_tx->shift);
7266     __put_user(host_tx->stabil, &target_tx->stabil);
7267     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7268     __put_user(host_tx->calcnt, &target_tx->calcnt);
7269     __put_user(host_tx->errcnt, &target_tx->errcnt);
7270     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7271     __put_user(host_tx->tai, &target_tx->tai);
7272 
7273     unlock_user_struct(target_tx, target_addr, 1);
7274     return 0;
7275 }
7276 #endif
7277 
7278 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7279 #define sigev_notify_thread_id _sigev_un._tid
7280 #endif
7281 
7282 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7283                                                abi_ulong target_addr)
7284 {
7285     struct target_sigevent *target_sevp;
7286 
7287     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7288         return -TARGET_EFAULT;
7289     }
7290 
7291     /* This union is awkward on 64 bit systems because it has a 32 bit
7292      * integer and a pointer in it; we follow the conversion approach
7293      * used for handling sigval types in signal.c so the guest should get
7294      * the correct value back even if we did a 64 bit byteswap and it's
7295      * using the 32 bit integer.
7296      */
7297     host_sevp->sigev_value.sival_ptr =
7298         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7299     host_sevp->sigev_signo =
7300         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7301     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7302     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7303 
7304     unlock_user_struct(target_sevp, target_addr, 1);
7305     return 0;
7306 }
7307 
7308 #if defined(TARGET_NR_mlockall)
7309 static inline int target_to_host_mlockall_arg(int arg)
7310 {
7311     int result = 0;
7312 
7313     if (arg & TARGET_MCL_CURRENT) {
7314         result |= MCL_CURRENT;
7315     }
7316     if (arg & TARGET_MCL_FUTURE) {
7317         result |= MCL_FUTURE;
7318     }
7319 #ifdef MCL_ONFAULT
7320     if (arg & TARGET_MCL_ONFAULT) {
7321         result |= MCL_ONFAULT;
7322     }
7323 #endif
7324 
7325     return result;
7326 }
7327 #endif
7328 
7329 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7330      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7331      defined(TARGET_NR_newfstatat))
7332 static inline abi_long host_to_target_stat64(void *cpu_env,
7333                                              abi_ulong target_addr,
7334                                              struct stat *host_st)
7335 {
7336 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7337     if (((CPUARMState *)cpu_env)->eabi) {
7338         struct target_eabi_stat64 *target_st;
7339 
7340         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7341             return -TARGET_EFAULT;
7342         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7343         __put_user(host_st->st_dev, &target_st->st_dev);
7344         __put_user(host_st->st_ino, &target_st->st_ino);
7345 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7346         __put_user(host_st->st_ino, &target_st->__st_ino);
7347 #endif
7348         __put_user(host_st->st_mode, &target_st->st_mode);
7349         __put_user(host_st->st_nlink, &target_st->st_nlink);
7350         __put_user(host_st->st_uid, &target_st->st_uid);
7351         __put_user(host_st->st_gid, &target_st->st_gid);
7352         __put_user(host_st->st_rdev, &target_st->st_rdev);
7353         __put_user(host_st->st_size, &target_st->st_size);
7354         __put_user(host_st->st_blksize, &target_st->st_blksize);
7355         __put_user(host_st->st_blocks, &target_st->st_blocks);
7356         __put_user(host_st->st_atime, &target_st->target_st_atime);
7357         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7358         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7359 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7360         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7361         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7362         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7363 #endif
7364         unlock_user_struct(target_st, target_addr, 1);
7365     } else
7366 #endif
7367     {
7368 #if defined(TARGET_HAS_STRUCT_STAT64)
7369         struct target_stat64 *target_st;
7370 #else
7371         struct target_stat *target_st;
7372 #endif
7373 
7374         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7375             return -TARGET_EFAULT;
7376         memset(target_st, 0, sizeof(*target_st));
7377         __put_user(host_st->st_dev, &target_st->st_dev);
7378         __put_user(host_st->st_ino, &target_st->st_ino);
7379 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7380         __put_user(host_st->st_ino, &target_st->__st_ino);
7381 #endif
7382         __put_user(host_st->st_mode, &target_st->st_mode);
7383         __put_user(host_st->st_nlink, &target_st->st_nlink);
7384         __put_user(host_st->st_uid, &target_st->st_uid);
7385         __put_user(host_st->st_gid, &target_st->st_gid);
7386         __put_user(host_st->st_rdev, &target_st->st_rdev);
7387         /* XXX: better use of kernel struct */
7388         __put_user(host_st->st_size, &target_st->st_size);
7389         __put_user(host_st->st_blksize, &target_st->st_blksize);
7390         __put_user(host_st->st_blocks, &target_st->st_blocks);
7391         __put_user(host_st->st_atime, &target_st->target_st_atime);
7392         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7393         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7394 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7395         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7396         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7397         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7398 #endif
7399         unlock_user_struct(target_st, target_addr, 1);
7400     }
7401 
7402     return 0;
7403 }
7404 #endif
7405 
7406 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7407 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7408                                             abi_ulong target_addr)
7409 {
7410     struct target_statx *target_stx;
7411 
7412     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7413         return -TARGET_EFAULT;
7414     }
7415     memset(target_stx, 0, sizeof(*target_stx));
7416 
7417     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7418     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7419     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7420     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7421     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7422     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7423     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7424     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7425     __put_user(host_stx->stx_size, &target_stx->stx_size);
7426     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7427     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7428     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7429     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7430     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7431     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7432     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7433     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7434     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7435     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7436     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7437     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7438     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7439     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7440 
7441     unlock_user_struct(target_stx, target_addr, 1);
7442 
7443     return 0;
7444 }
7445 #endif
7446 
7447 static int do_sys_futex(int *uaddr, int op, int val,
7448                          const struct timespec *timeout, int *uaddr2,
7449                          int val3)
7450 {
7451 #if HOST_LONG_BITS == 64
7452 #if defined(__NR_futex)
7453     /* always a 64-bit time_t, it doesn't define _time64 version  */
7454     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7455 
7456 #endif
7457 #else /* HOST_LONG_BITS == 64 */
7458 #if defined(__NR_futex_time64)
7459     if (sizeof(timeout->tv_sec) == 8) {
7460         /* _time64 function on 32bit arch */
7461         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7462     }
7463 #endif
7464 #if defined(__NR_futex)
7465     /* old function on 32bit arch */
7466     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7467 #endif
7468 #endif /* HOST_LONG_BITS == 64 */
7469     g_assert_not_reached();
7470 }
7471 
7472 static int do_safe_futex(int *uaddr, int op, int val,
7473                          const struct timespec *timeout, int *uaddr2,
7474                          int val3)
7475 {
7476 #if HOST_LONG_BITS == 64
7477 #if defined(__NR_futex)
7478     /* always a 64-bit time_t, it doesn't define _time64 version  */
7479     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7480 #endif
7481 #else /* HOST_LONG_BITS == 64 */
7482 #if defined(__NR_futex_time64)
7483     if (sizeof(timeout->tv_sec) == 8) {
7484         /* _time64 function on 32bit arch */
7485         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7486                                            val3));
7487     }
7488 #endif
7489 #if defined(__NR_futex)
7490     /* old function on 32bit arch */
7491     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7492 #endif
7493 #endif /* HOST_LONG_BITS == 64 */
7494     return -TARGET_ENOSYS;
7495 }
7496 
7497 /* ??? Using host futex calls even when target atomic operations
7498    are not really atomic probably breaks things.  However implementing
7499    futexes locally would make futexes shared between multiple processes
7500    tricky.  However they're probably useless because guest atomic
7501    operations won't work either.  */
7502 #if defined(TARGET_NR_futex)
7503 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7504                     target_ulong timeout, target_ulong uaddr2, int val3)
7505 {
7506     struct timespec ts, *pts;
7507     int base_op;
7508 
7509     /* ??? We assume FUTEX_* constants are the same on both host
7510        and target.  */
7511 #ifdef FUTEX_CMD_MASK
7512     base_op = op & FUTEX_CMD_MASK;
7513 #else
7514     base_op = op;
7515 #endif
7516     switch (base_op) {
7517     case FUTEX_WAIT:
7518     case FUTEX_WAIT_BITSET:
7519         if (timeout) {
7520             pts = &ts;
7521             target_to_host_timespec(pts, timeout);
7522         } else {
7523             pts = NULL;
7524         }
7525         return do_safe_futex(g2h(cpu, uaddr),
7526                              op, tswap32(val), pts, NULL, val3);
7527     case FUTEX_WAKE:
7528         return do_safe_futex(g2h(cpu, uaddr),
7529                              op, val, NULL, NULL, 0);
7530     case FUTEX_FD:
7531         return do_safe_futex(g2h(cpu, uaddr),
7532                              op, val, NULL, NULL, 0);
7533     case FUTEX_REQUEUE:
7534     case FUTEX_CMP_REQUEUE:
7535     case FUTEX_WAKE_OP:
7536         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7537            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7538            But the prototype takes a `struct timespec *'; insert casts
7539            to satisfy the compiler.  We do not need to tswap TIMEOUT
7540            since it's not compared to guest memory.  */
7541         pts = (struct timespec *)(uintptr_t) timeout;
7542         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7543                              (base_op == FUTEX_CMP_REQUEUE
7544                               ? tswap32(val3) : val3));
7545     default:
7546         return -TARGET_ENOSYS;
7547     }
7548 }
7549 #endif
7550 
7551 #if defined(TARGET_NR_futex_time64)
7552 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7553                            int val, target_ulong timeout,
7554                            target_ulong uaddr2, int val3)
7555 {
7556     struct timespec ts, *pts;
7557     int base_op;
7558 
7559     /* ??? We assume FUTEX_* constants are the same on both host
7560        and target.  */
7561 #ifdef FUTEX_CMD_MASK
7562     base_op = op & FUTEX_CMD_MASK;
7563 #else
7564     base_op = op;
7565 #endif
7566     switch (base_op) {
7567     case FUTEX_WAIT:
7568     case FUTEX_WAIT_BITSET:
7569         if (timeout) {
7570             pts = &ts;
7571             if (target_to_host_timespec64(pts, timeout)) {
7572                 return -TARGET_EFAULT;
7573             }
7574         } else {
7575             pts = NULL;
7576         }
7577         return do_safe_futex(g2h(cpu, uaddr), op,
7578                              tswap32(val), pts, NULL, val3);
7579     case FUTEX_WAKE:
7580         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7581     case FUTEX_FD:
7582         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7583     case FUTEX_REQUEUE:
7584     case FUTEX_CMP_REQUEUE:
7585     case FUTEX_WAKE_OP:
7586         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7587            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7588            But the prototype takes a `struct timespec *'; insert casts
7589            to satisfy the compiler.  We do not need to tswap TIMEOUT
7590            since it's not compared to guest memory.  */
7591         pts = (struct timespec *)(uintptr_t) timeout;
7592         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7593                              (base_op == FUTEX_CMP_REQUEUE
7594                               ? tswap32(val3) : val3));
7595     default:
7596         return -TARGET_ENOSYS;
7597     }
7598 }
7599 #endif
7600 
7601 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7602 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7603                                      abi_long handle, abi_long mount_id,
7604                                      abi_long flags)
7605 {
7606     struct file_handle *target_fh;
7607     struct file_handle *fh;
7608     int mid = 0;
7609     abi_long ret;
7610     char *name;
7611     unsigned int size, total_size;
7612 
7613     if (get_user_s32(size, handle)) {
7614         return -TARGET_EFAULT;
7615     }
7616 
7617     name = lock_user_string(pathname);
7618     if (!name) {
7619         return -TARGET_EFAULT;
7620     }
7621 
7622     total_size = sizeof(struct file_handle) + size;
7623     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7624     if (!target_fh) {
7625         unlock_user(name, pathname, 0);
7626         return -TARGET_EFAULT;
7627     }
7628 
7629     fh = g_malloc0(total_size);
7630     fh->handle_bytes = size;
7631 
7632     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7633     unlock_user(name, pathname, 0);
7634 
7635     /* man name_to_handle_at(2):
7636      * Other than the use of the handle_bytes field, the caller should treat
7637      * the file_handle structure as an opaque data type
7638      */
7639 
7640     memcpy(target_fh, fh, total_size);
7641     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7642     target_fh->handle_type = tswap32(fh->handle_type);
7643     g_free(fh);
7644     unlock_user(target_fh, handle, total_size);
7645 
7646     if (put_user_s32(mid, mount_id)) {
7647         return -TARGET_EFAULT;
7648     }
7649 
7650     return ret;
7651 
7652 }
7653 #endif
7654 
7655 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7656 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7657                                      abi_long flags)
7658 {
7659     struct file_handle *target_fh;
7660     struct file_handle *fh;
7661     unsigned int size, total_size;
7662     abi_long ret;
7663 
7664     if (get_user_s32(size, handle)) {
7665         return -TARGET_EFAULT;
7666     }
7667 
7668     total_size = sizeof(struct file_handle) + size;
7669     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7670     if (!target_fh) {
7671         return -TARGET_EFAULT;
7672     }
7673 
7674     fh = g_memdup(target_fh, total_size);
7675     fh->handle_bytes = size;
7676     fh->handle_type = tswap32(target_fh->handle_type);
7677 
7678     ret = get_errno(open_by_handle_at(mount_fd, fh,
7679                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7680 
7681     g_free(fh);
7682 
7683     unlock_user(target_fh, handle, total_size);
7684 
7685     return ret;
7686 }
7687 #endif
7688 
7689 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7690 
7691 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7692 {
7693     int host_flags;
7694     target_sigset_t *target_mask;
7695     sigset_t host_mask;
7696     abi_long ret;
7697 
7698     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7699         return -TARGET_EINVAL;
7700     }
7701     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7702         return -TARGET_EFAULT;
7703     }
7704 
7705     target_to_host_sigset(&host_mask, target_mask);
7706 
7707     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7708 
7709     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7710     if (ret >= 0) {
7711         fd_trans_register(ret, &target_signalfd_trans);
7712     }
7713 
7714     unlock_user_struct(target_mask, mask, 0);
7715 
7716     return ret;
7717 }
7718 #endif
7719 
7720 /* Map host to target signal numbers for the wait family of syscalls.
7721    Assume all other status bits are the same.  */
7722 int host_to_target_waitstatus(int status)
7723 {
7724     if (WIFSIGNALED(status)) {
7725         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7726     }
7727     if (WIFSTOPPED(status)) {
7728         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7729                | (status & 0xff);
7730     }
7731     return status;
7732 }
7733 
7734 static int open_self_cmdline(void *cpu_env, int fd)
7735 {
7736     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7737     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7738     int i;
7739 
7740     for (i = 0; i < bprm->argc; i++) {
7741         size_t len = strlen(bprm->argv[i]) + 1;
7742 
7743         if (write(fd, bprm->argv[i], len) != len) {
7744             return -1;
7745         }
7746     }
7747 
7748     return 0;
7749 }
7750 
7751 static int open_self_maps(void *cpu_env, int fd)
7752 {
7753     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7754     TaskState *ts = cpu->opaque;
7755     GSList *map_info = read_self_maps();
7756     GSList *s;
7757     int count;
7758 
7759     for (s = map_info; s; s = g_slist_next(s)) {
7760         MapInfo *e = (MapInfo *) s->data;
7761 
7762         if (h2g_valid(e->start)) {
7763             unsigned long min = e->start;
7764             unsigned long max = e->end;
7765             int flags = page_get_flags(h2g(min));
7766             const char *path;
7767 
7768             max = h2g_valid(max - 1) ?
7769                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7770 
7771             if (page_check_range(h2g(min), max - min, flags) == -1) {
7772                 continue;
7773             }
7774 
7775             if (h2g(min) == ts->info->stack_limit) {
7776                 path = "[stack]";
7777             } else {
7778                 path = e->path;
7779             }
7780 
7781             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7782                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7783                             h2g(min), h2g(max - 1) + 1,
7784                             (flags & PAGE_READ) ? 'r' : '-',
7785                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7786                             (flags & PAGE_EXEC) ? 'x' : '-',
7787                             e->is_priv ? 'p' : '-',
7788                             (uint64_t) e->offset, e->dev, e->inode);
7789             if (path) {
7790                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7791             } else {
7792                 dprintf(fd, "\n");
7793             }
7794         }
7795     }
7796 
7797     free_self_maps(map_info);
7798 
7799 #ifdef TARGET_VSYSCALL_PAGE
7800     /*
7801      * We only support execution from the vsyscall page.
7802      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7803      */
7804     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7805                     " --xp 00000000 00:00 0",
7806                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7807     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7808 #endif
7809 
7810     return 0;
7811 }
7812 
7813 static int open_self_stat(void *cpu_env, int fd)
7814 {
7815     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7816     TaskState *ts = cpu->opaque;
7817     g_autoptr(GString) buf = g_string_new(NULL);
7818     int i;
7819 
7820     for (i = 0; i < 44; i++) {
7821         if (i == 0) {
7822             /* pid */
7823             g_string_printf(buf, FMT_pid " ", getpid());
7824         } else if (i == 1) {
7825             /* app name */
7826             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7827             bin = bin ? bin + 1 : ts->bprm->argv[0];
7828             g_string_printf(buf, "(%.15s) ", bin);
7829         } else if (i == 3) {
7830             /* ppid */
7831             g_string_printf(buf, FMT_pid " ", getppid());
7832         } else if (i == 27) {
7833             /* stack bottom */
7834             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7835         } else {
7836             /* for the rest, there is MasterCard */
7837             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7838         }
7839 
7840         if (write(fd, buf->str, buf->len) != buf->len) {
7841             return -1;
7842         }
7843     }
7844 
7845     return 0;
7846 }
7847 
7848 static int open_self_auxv(void *cpu_env, int fd)
7849 {
7850     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7851     TaskState *ts = cpu->opaque;
7852     abi_ulong auxv = ts->info->saved_auxv;
7853     abi_ulong len = ts->info->auxv_len;
7854     char *ptr;
7855 
7856     /*
7857      * Auxiliary vector is stored in target process stack.
7858      * read in whole auxv vector and copy it to file
7859      */
7860     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7861     if (ptr != NULL) {
7862         while (len > 0) {
7863             ssize_t r;
7864             r = write(fd, ptr, len);
7865             if (r <= 0) {
7866                 break;
7867             }
7868             len -= r;
7869             ptr += r;
7870         }
7871         lseek(fd, 0, SEEK_SET);
7872         unlock_user(ptr, auxv, len);
7873     }
7874 
7875     return 0;
7876 }
7877 
7878 static int is_proc_myself(const char *filename, const char *entry)
7879 {
7880     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7881         filename += strlen("/proc/");
7882         if (!strncmp(filename, "self/", strlen("self/"))) {
7883             filename += strlen("self/");
7884         } else if (*filename >= '1' && *filename <= '9') {
7885             char myself[80];
7886             snprintf(myself, sizeof(myself), "%d/", getpid());
7887             if (!strncmp(filename, myself, strlen(myself))) {
7888                 filename += strlen(myself);
7889             } else {
7890                 return 0;
7891             }
7892         } else {
7893             return 0;
7894         }
7895         if (!strcmp(filename, entry)) {
7896             return 1;
7897         }
7898     }
7899     return 0;
7900 }
7901 
7902 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7903     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7904 static int is_proc(const char *filename, const char *entry)
7905 {
7906     return strcmp(filename, entry) == 0;
7907 }
7908 #endif
7909 
7910 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7911 static int open_net_route(void *cpu_env, int fd)
7912 {
7913     FILE *fp;
7914     char *line = NULL;
7915     size_t len = 0;
7916     ssize_t read;
7917 
7918     fp = fopen("/proc/net/route", "r");
7919     if (fp == NULL) {
7920         return -1;
7921     }
7922 
7923     /* read header */
7924 
7925     read = getline(&line, &len, fp);
7926     dprintf(fd, "%s", line);
7927 
7928     /* read routes */
7929 
7930     while ((read = getline(&line, &len, fp)) != -1) {
7931         char iface[16];
7932         uint32_t dest, gw, mask;
7933         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7934         int fields;
7935 
7936         fields = sscanf(line,
7937                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7938                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7939                         &mask, &mtu, &window, &irtt);
7940         if (fields != 11) {
7941             continue;
7942         }
7943         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7944                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7945                 metric, tswap32(mask), mtu, window, irtt);
7946     }
7947 
7948     free(line);
7949     fclose(fp);
7950 
7951     return 0;
7952 }
7953 #endif
7954 
7955 #if defined(TARGET_SPARC)
7956 static int open_cpuinfo(void *cpu_env, int fd)
7957 {
7958     dprintf(fd, "type\t\t: sun4u\n");
7959     return 0;
7960 }
7961 #endif
7962 
7963 #if defined(TARGET_HPPA)
7964 static int open_cpuinfo(void *cpu_env, int fd)
7965 {
7966     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7967     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7968     dprintf(fd, "capabilities\t: os32\n");
7969     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7970     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7971     return 0;
7972 }
7973 #endif
7974 
7975 #if defined(TARGET_M68K)
7976 static int open_hardware(void *cpu_env, int fd)
7977 {
7978     dprintf(fd, "Model:\t\tqemu-m68k\n");
7979     return 0;
7980 }
7981 #endif
7982 
7983 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7984 {
7985     struct fake_open {
7986         const char *filename;
7987         int (*fill)(void *cpu_env, int fd);
7988         int (*cmp)(const char *s1, const char *s2);
7989     };
7990     const struct fake_open *fake_open;
7991     static const struct fake_open fakes[] = {
7992         { "maps", open_self_maps, is_proc_myself },
7993         { "stat", open_self_stat, is_proc_myself },
7994         { "auxv", open_self_auxv, is_proc_myself },
7995         { "cmdline", open_self_cmdline, is_proc_myself },
7996 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7997         { "/proc/net/route", open_net_route, is_proc },
7998 #endif
7999 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8000         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8001 #endif
8002 #if defined(TARGET_M68K)
8003         { "/proc/hardware", open_hardware, is_proc },
8004 #endif
8005         { NULL, NULL, NULL }
8006     };
8007 
8008     if (is_proc_myself(pathname, "exe")) {
8009         int execfd = qemu_getauxval(AT_EXECFD);
8010         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8011     }
8012 
8013     for (fake_open = fakes; fake_open->filename; fake_open++) {
8014         if (fake_open->cmp(pathname, fake_open->filename)) {
8015             break;
8016         }
8017     }
8018 
8019     if (fake_open->filename) {
8020         const char *tmpdir;
8021         char filename[PATH_MAX];
8022         int fd, r;
8023 
8024         /* create temporary file to map stat to */
8025         tmpdir = getenv("TMPDIR");
8026         if (!tmpdir)
8027             tmpdir = "/tmp";
8028         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8029         fd = mkstemp(filename);
8030         if (fd < 0) {
8031             return fd;
8032         }
8033         unlink(filename);
8034 
8035         if ((r = fake_open->fill(cpu_env, fd))) {
8036             int e = errno;
8037             close(fd);
8038             errno = e;
8039             return r;
8040         }
8041         lseek(fd, 0, SEEK_SET);
8042 
8043         return fd;
8044     }
8045 
8046     return safe_openat(dirfd, path(pathname), flags, mode);
8047 }
8048 
8049 #define TIMER_MAGIC 0x0caf0000
8050 #define TIMER_MAGIC_MASK 0xffff0000
8051 
8052 /* Convert QEMU provided timer ID back to internal 16bit index format */
8053 static target_timer_t get_timer_id(abi_long arg)
8054 {
8055     target_timer_t timerid = arg;
8056 
8057     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8058         return -TARGET_EINVAL;
8059     }
8060 
8061     timerid &= 0xffff;
8062 
8063     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8064         return -TARGET_EINVAL;
8065     }
8066 
8067     return timerid;
8068 }
8069 
8070 static int target_to_host_cpu_mask(unsigned long *host_mask,
8071                                    size_t host_size,
8072                                    abi_ulong target_addr,
8073                                    size_t target_size)
8074 {
8075     unsigned target_bits = sizeof(abi_ulong) * 8;
8076     unsigned host_bits = sizeof(*host_mask) * 8;
8077     abi_ulong *target_mask;
8078     unsigned i, j;
8079 
8080     assert(host_size >= target_size);
8081 
8082     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8083     if (!target_mask) {
8084         return -TARGET_EFAULT;
8085     }
8086     memset(host_mask, 0, host_size);
8087 
8088     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8089         unsigned bit = i * target_bits;
8090         abi_ulong val;
8091 
8092         __get_user(val, &target_mask[i]);
8093         for (j = 0; j < target_bits; j++, bit++) {
8094             if (val & (1UL << j)) {
8095                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8096             }
8097         }
8098     }
8099 
8100     unlock_user(target_mask, target_addr, 0);
8101     return 0;
8102 }
8103 
8104 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8105                                    size_t host_size,
8106                                    abi_ulong target_addr,
8107                                    size_t target_size)
8108 {
8109     unsigned target_bits = sizeof(abi_ulong) * 8;
8110     unsigned host_bits = sizeof(*host_mask) * 8;
8111     abi_ulong *target_mask;
8112     unsigned i, j;
8113 
8114     assert(host_size >= target_size);
8115 
8116     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8117     if (!target_mask) {
8118         return -TARGET_EFAULT;
8119     }
8120 
8121     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8122         unsigned bit = i * target_bits;
8123         abi_ulong val = 0;
8124 
8125         for (j = 0; j < target_bits; j++, bit++) {
8126             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8127                 val |= 1UL << j;
8128             }
8129         }
8130         __put_user(val, &target_mask[i]);
8131     }
8132 
8133     unlock_user(target_mask, target_addr, target_size);
8134     return 0;
8135 }
8136 
8137 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8138 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8139 #endif
8140 
8141 /* This is an internal helper for do_syscall so that it is easier
8142  * to have a single return point, so that actions, such as logging
8143  * of syscall results, can be performed.
8144  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8145  */
8146 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8147                             abi_long arg2, abi_long arg3, abi_long arg4,
8148                             abi_long arg5, abi_long arg6, abi_long arg7,
8149                             abi_long arg8)
8150 {
8151     CPUState *cpu = env_cpu(cpu_env);
8152     abi_long ret;
8153 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8154     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8155     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8156     || defined(TARGET_NR_statx)
8157     struct stat st;
8158 #endif
8159 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8160     || defined(TARGET_NR_fstatfs)
8161     struct statfs stfs;
8162 #endif
8163     void *p;
8164 
8165     switch(num) {
8166     case TARGET_NR_exit:
8167         /* In old applications this may be used to implement _exit(2).
8168            However in threaded applications it is used for thread termination,
8169            and _exit_group is used for application termination.
8170            Do thread termination if we have more then one thread.  */
8171 
8172         if (block_signals()) {
8173             return -TARGET_ERESTARTSYS;
8174         }
8175 
8176         pthread_mutex_lock(&clone_lock);
8177 
8178         if (CPU_NEXT(first_cpu)) {
8179             TaskState *ts = cpu->opaque;
8180 
8181             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8182             object_unref(OBJECT(cpu));
8183             /*
8184              * At this point the CPU should be unrealized and removed
8185              * from cpu lists. We can clean-up the rest of the thread
8186              * data without the lock held.
8187              */
8188 
8189             pthread_mutex_unlock(&clone_lock);
8190 
8191             if (ts->child_tidptr) {
8192                 put_user_u32(0, ts->child_tidptr);
8193                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8194                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8195             }
8196             thread_cpu = NULL;
8197             g_free(ts);
8198             rcu_unregister_thread();
8199             pthread_exit(NULL);
8200         }
8201 
8202         pthread_mutex_unlock(&clone_lock);
8203         preexit_cleanup(cpu_env, arg1);
8204         _exit(arg1);
8205         return 0; /* avoid warning */
8206     case TARGET_NR_read:
8207         if (arg2 == 0 && arg3 == 0) {
8208             return get_errno(safe_read(arg1, 0, 0));
8209         } else {
8210             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8211                 return -TARGET_EFAULT;
8212             ret = get_errno(safe_read(arg1, p, arg3));
8213             if (ret >= 0 &&
8214                 fd_trans_host_to_target_data(arg1)) {
8215                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8216             }
8217             unlock_user(p, arg2, ret);
8218         }
8219         return ret;
8220     case TARGET_NR_write:
8221         if (arg2 == 0 && arg3 == 0) {
8222             return get_errno(safe_write(arg1, 0, 0));
8223         }
8224         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8225             return -TARGET_EFAULT;
8226         if (fd_trans_target_to_host_data(arg1)) {
8227             void *copy = g_malloc(arg3);
8228             memcpy(copy, p, arg3);
8229             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8230             if (ret >= 0) {
8231                 ret = get_errno(safe_write(arg1, copy, ret));
8232             }
8233             g_free(copy);
8234         } else {
8235             ret = get_errno(safe_write(arg1, p, arg3));
8236         }
8237         unlock_user(p, arg2, 0);
8238         return ret;
8239 
8240 #ifdef TARGET_NR_open
8241     case TARGET_NR_open:
8242         if (!(p = lock_user_string(arg1)))
8243             return -TARGET_EFAULT;
8244         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8245                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8246                                   arg3));
8247         fd_trans_unregister(ret);
8248         unlock_user(p, arg1, 0);
8249         return ret;
8250 #endif
8251     case TARGET_NR_openat:
8252         if (!(p = lock_user_string(arg2)))
8253             return -TARGET_EFAULT;
8254         ret = get_errno(do_openat(cpu_env, arg1, p,
8255                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8256                                   arg4));
8257         fd_trans_unregister(ret);
8258         unlock_user(p, arg2, 0);
8259         return ret;
8260 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8261     case TARGET_NR_name_to_handle_at:
8262         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8263         return ret;
8264 #endif
8265 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8266     case TARGET_NR_open_by_handle_at:
8267         ret = do_open_by_handle_at(arg1, arg2, arg3);
8268         fd_trans_unregister(ret);
8269         return ret;
8270 #endif
8271     case TARGET_NR_close:
8272         fd_trans_unregister(arg1);
8273         return get_errno(close(arg1));
8274 
8275     case TARGET_NR_brk:
8276         return do_brk(arg1);
8277 #ifdef TARGET_NR_fork
8278     case TARGET_NR_fork:
8279         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8280 #endif
8281 #ifdef TARGET_NR_waitpid
8282     case TARGET_NR_waitpid:
8283         {
8284             int status;
8285             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8286             if (!is_error(ret) && arg2 && ret
8287                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8288                 return -TARGET_EFAULT;
8289         }
8290         return ret;
8291 #endif
8292 #ifdef TARGET_NR_waitid
8293     case TARGET_NR_waitid:
8294         {
8295             siginfo_t info;
8296             info.si_pid = 0;
8297             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8298             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8299                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8300                     return -TARGET_EFAULT;
8301                 host_to_target_siginfo(p, &info);
8302                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8303             }
8304         }
8305         return ret;
8306 #endif
8307 #ifdef TARGET_NR_creat /* not on alpha */
8308     case TARGET_NR_creat:
8309         if (!(p = lock_user_string(arg1)))
8310             return -TARGET_EFAULT;
8311         ret = get_errno(creat(p, arg2));
8312         fd_trans_unregister(ret);
8313         unlock_user(p, arg1, 0);
8314         return ret;
8315 #endif
8316 #ifdef TARGET_NR_link
8317     case TARGET_NR_link:
8318         {
8319             void * p2;
8320             p = lock_user_string(arg1);
8321             p2 = lock_user_string(arg2);
8322             if (!p || !p2)
8323                 ret = -TARGET_EFAULT;
8324             else
8325                 ret = get_errno(link(p, p2));
8326             unlock_user(p2, arg2, 0);
8327             unlock_user(p, arg1, 0);
8328         }
8329         return ret;
8330 #endif
8331 #if defined(TARGET_NR_linkat)
8332     case TARGET_NR_linkat:
8333         {
8334             void * p2 = NULL;
8335             if (!arg2 || !arg4)
8336                 return -TARGET_EFAULT;
8337             p  = lock_user_string(arg2);
8338             p2 = lock_user_string(arg4);
8339             if (!p || !p2)
8340                 ret = -TARGET_EFAULT;
8341             else
8342                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8343             unlock_user(p, arg2, 0);
8344             unlock_user(p2, arg4, 0);
8345         }
8346         return ret;
8347 #endif
8348 #ifdef TARGET_NR_unlink
8349     case TARGET_NR_unlink:
8350         if (!(p = lock_user_string(arg1)))
8351             return -TARGET_EFAULT;
8352         ret = get_errno(unlink(p));
8353         unlock_user(p, arg1, 0);
8354         return ret;
8355 #endif
8356 #if defined(TARGET_NR_unlinkat)
8357     case TARGET_NR_unlinkat:
8358         if (!(p = lock_user_string(arg2)))
8359             return -TARGET_EFAULT;
8360         ret = get_errno(unlinkat(arg1, p, arg3));
8361         unlock_user(p, arg2, 0);
8362         return ret;
8363 #endif
8364     case TARGET_NR_execve:
8365         {
8366             char **argp, **envp;
8367             int argc, envc;
8368             abi_ulong gp;
8369             abi_ulong guest_argp;
8370             abi_ulong guest_envp;
8371             abi_ulong addr;
8372             char **q;
8373 
8374             argc = 0;
8375             guest_argp = arg2;
8376             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8377                 if (get_user_ual(addr, gp))
8378                     return -TARGET_EFAULT;
8379                 if (!addr)
8380                     break;
8381                 argc++;
8382             }
8383             envc = 0;
8384             guest_envp = arg3;
8385             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8386                 if (get_user_ual(addr, gp))
8387                     return -TARGET_EFAULT;
8388                 if (!addr)
8389                     break;
8390                 envc++;
8391             }
8392 
8393             argp = g_new0(char *, argc + 1);
8394             envp = g_new0(char *, envc + 1);
8395 
8396             for (gp = guest_argp, q = argp; gp;
8397                   gp += sizeof(abi_ulong), q++) {
8398                 if (get_user_ual(addr, gp))
8399                     goto execve_efault;
8400                 if (!addr)
8401                     break;
8402                 if (!(*q = lock_user_string(addr)))
8403                     goto execve_efault;
8404             }
8405             *q = NULL;
8406 
8407             for (gp = guest_envp, q = envp; gp;
8408                   gp += sizeof(abi_ulong), q++) {
8409                 if (get_user_ual(addr, gp))
8410                     goto execve_efault;
8411                 if (!addr)
8412                     break;
8413                 if (!(*q = lock_user_string(addr)))
8414                     goto execve_efault;
8415             }
8416             *q = NULL;
8417 
8418             if (!(p = lock_user_string(arg1)))
8419                 goto execve_efault;
8420             /* Although execve() is not an interruptible syscall it is
8421              * a special case where we must use the safe_syscall wrapper:
8422              * if we allow a signal to happen before we make the host
8423              * syscall then we will 'lose' it, because at the point of
8424              * execve the process leaves QEMU's control. So we use the
8425              * safe syscall wrapper to ensure that we either take the
8426              * signal as a guest signal, or else it does not happen
8427              * before the execve completes and makes it the other
8428              * program's problem.
8429              */
8430             ret = get_errno(safe_execve(p, argp, envp));
8431             unlock_user(p, arg1, 0);
8432 
8433             goto execve_end;
8434 
8435         execve_efault:
8436             ret = -TARGET_EFAULT;
8437 
8438         execve_end:
8439             for (gp = guest_argp, q = argp; *q;
8440                   gp += sizeof(abi_ulong), q++) {
8441                 if (get_user_ual(addr, gp)
8442                     || !addr)
8443                     break;
8444                 unlock_user(*q, addr, 0);
8445             }
8446             for (gp = guest_envp, q = envp; *q;
8447                   gp += sizeof(abi_ulong), q++) {
8448                 if (get_user_ual(addr, gp)
8449                     || !addr)
8450                     break;
8451                 unlock_user(*q, addr, 0);
8452             }
8453 
8454             g_free(argp);
8455             g_free(envp);
8456         }
8457         return ret;
8458     case TARGET_NR_chdir:
8459         if (!(p = lock_user_string(arg1)))
8460             return -TARGET_EFAULT;
8461         ret = get_errno(chdir(p));
8462         unlock_user(p, arg1, 0);
8463         return ret;
8464 #ifdef TARGET_NR_time
8465     case TARGET_NR_time:
8466         {
8467             time_t host_time;
8468             ret = get_errno(time(&host_time));
8469             if (!is_error(ret)
8470                 && arg1
8471                 && put_user_sal(host_time, arg1))
8472                 return -TARGET_EFAULT;
8473         }
8474         return ret;
8475 #endif
8476 #ifdef TARGET_NR_mknod
8477     case TARGET_NR_mknod:
8478         if (!(p = lock_user_string(arg1)))
8479             return -TARGET_EFAULT;
8480         ret = get_errno(mknod(p, arg2, arg3));
8481         unlock_user(p, arg1, 0);
8482         return ret;
8483 #endif
8484 #if defined(TARGET_NR_mknodat)
8485     case TARGET_NR_mknodat:
8486         if (!(p = lock_user_string(arg2)))
8487             return -TARGET_EFAULT;
8488         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8489         unlock_user(p, arg2, 0);
8490         return ret;
8491 #endif
8492 #ifdef TARGET_NR_chmod
8493     case TARGET_NR_chmod:
8494         if (!(p = lock_user_string(arg1)))
8495             return -TARGET_EFAULT;
8496         ret = get_errno(chmod(p, arg2));
8497         unlock_user(p, arg1, 0);
8498         return ret;
8499 #endif
8500 #ifdef TARGET_NR_lseek
8501     case TARGET_NR_lseek:
8502         return get_errno(lseek(arg1, arg2, arg3));
8503 #endif
8504 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8505     /* Alpha specific */
8506     case TARGET_NR_getxpid:
8507         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8508         return get_errno(getpid());
8509 #endif
8510 #ifdef TARGET_NR_getpid
8511     case TARGET_NR_getpid:
8512         return get_errno(getpid());
8513 #endif
8514     case TARGET_NR_mount:
8515         {
8516             /* need to look at the data field */
8517             void *p2, *p3;
8518 
8519             if (arg1) {
8520                 p = lock_user_string(arg1);
8521                 if (!p) {
8522                     return -TARGET_EFAULT;
8523                 }
8524             } else {
8525                 p = NULL;
8526             }
8527 
8528             p2 = lock_user_string(arg2);
8529             if (!p2) {
8530                 if (arg1) {
8531                     unlock_user(p, arg1, 0);
8532                 }
8533                 return -TARGET_EFAULT;
8534             }
8535 
8536             if (arg3) {
8537                 p3 = lock_user_string(arg3);
8538                 if (!p3) {
8539                     if (arg1) {
8540                         unlock_user(p, arg1, 0);
8541                     }
8542                     unlock_user(p2, arg2, 0);
8543                     return -TARGET_EFAULT;
8544                 }
8545             } else {
8546                 p3 = NULL;
8547             }
8548 
8549             /* FIXME - arg5 should be locked, but it isn't clear how to
8550              * do that since it's not guaranteed to be a NULL-terminated
8551              * string.
8552              */
8553             if (!arg5) {
8554                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8555             } else {
8556                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8557             }
8558             ret = get_errno(ret);
8559 
8560             if (arg1) {
8561                 unlock_user(p, arg1, 0);
8562             }
8563             unlock_user(p2, arg2, 0);
8564             if (arg3) {
8565                 unlock_user(p3, arg3, 0);
8566             }
8567         }
8568         return ret;
8569 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8570 #if defined(TARGET_NR_umount)
8571     case TARGET_NR_umount:
8572 #endif
8573 #if defined(TARGET_NR_oldumount)
8574     case TARGET_NR_oldumount:
8575 #endif
8576         if (!(p = lock_user_string(arg1)))
8577             return -TARGET_EFAULT;
8578         ret = get_errno(umount(p));
8579         unlock_user(p, arg1, 0);
8580         return ret;
8581 #endif
8582 #ifdef TARGET_NR_stime /* not on alpha */
8583     case TARGET_NR_stime:
8584         {
8585             struct timespec ts;
8586             ts.tv_nsec = 0;
8587             if (get_user_sal(ts.tv_sec, arg1)) {
8588                 return -TARGET_EFAULT;
8589             }
8590             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8591         }
8592 #endif
8593 #ifdef TARGET_NR_alarm /* not on alpha */
8594     case TARGET_NR_alarm:
8595         return alarm(arg1);
8596 #endif
8597 #ifdef TARGET_NR_pause /* not on alpha */
8598     case TARGET_NR_pause:
8599         if (!block_signals()) {
8600             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8601         }
8602         return -TARGET_EINTR;
8603 #endif
8604 #ifdef TARGET_NR_utime
8605     case TARGET_NR_utime:
8606         {
8607             struct utimbuf tbuf, *host_tbuf;
8608             struct target_utimbuf *target_tbuf;
8609             if (arg2) {
8610                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8611                     return -TARGET_EFAULT;
8612                 tbuf.actime = tswapal(target_tbuf->actime);
8613                 tbuf.modtime = tswapal(target_tbuf->modtime);
8614                 unlock_user_struct(target_tbuf, arg2, 0);
8615                 host_tbuf = &tbuf;
8616             } else {
8617                 host_tbuf = NULL;
8618             }
8619             if (!(p = lock_user_string(arg1)))
8620                 return -TARGET_EFAULT;
8621             ret = get_errno(utime(p, host_tbuf));
8622             unlock_user(p, arg1, 0);
8623         }
8624         return ret;
8625 #endif
8626 #ifdef TARGET_NR_utimes
8627     case TARGET_NR_utimes:
8628         {
8629             struct timeval *tvp, tv[2];
8630             if (arg2) {
8631                 if (copy_from_user_timeval(&tv[0], arg2)
8632                     || copy_from_user_timeval(&tv[1],
8633                                               arg2 + sizeof(struct target_timeval)))
8634                     return -TARGET_EFAULT;
8635                 tvp = tv;
8636             } else {
8637                 tvp = NULL;
8638             }
8639             if (!(p = lock_user_string(arg1)))
8640                 return -TARGET_EFAULT;
8641             ret = get_errno(utimes(p, tvp));
8642             unlock_user(p, arg1, 0);
8643         }
8644         return ret;
8645 #endif
8646 #if defined(TARGET_NR_futimesat)
8647     case TARGET_NR_futimesat:
8648         {
8649             struct timeval *tvp, tv[2];
8650             if (arg3) {
8651                 if (copy_from_user_timeval(&tv[0], arg3)
8652                     || copy_from_user_timeval(&tv[1],
8653                                               arg3 + sizeof(struct target_timeval)))
8654                     return -TARGET_EFAULT;
8655                 tvp = tv;
8656             } else {
8657                 tvp = NULL;
8658             }
8659             if (!(p = lock_user_string(arg2))) {
8660                 return -TARGET_EFAULT;
8661             }
8662             ret = get_errno(futimesat(arg1, path(p), tvp));
8663             unlock_user(p, arg2, 0);
8664         }
8665         return ret;
8666 #endif
8667 #ifdef TARGET_NR_access
8668     case TARGET_NR_access:
8669         if (!(p = lock_user_string(arg1))) {
8670             return -TARGET_EFAULT;
8671         }
8672         ret = get_errno(access(path(p), arg2));
8673         unlock_user(p, arg1, 0);
8674         return ret;
8675 #endif
8676 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8677     case TARGET_NR_faccessat:
8678         if (!(p = lock_user_string(arg2))) {
8679             return -TARGET_EFAULT;
8680         }
8681         ret = get_errno(faccessat(arg1, p, arg3, 0));
8682         unlock_user(p, arg2, 0);
8683         return ret;
8684 #endif
8685 #ifdef TARGET_NR_nice /* not on alpha */
8686     case TARGET_NR_nice:
8687         return get_errno(nice(arg1));
8688 #endif
8689     case TARGET_NR_sync:
8690         sync();
8691         return 0;
8692 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8693     case TARGET_NR_syncfs:
8694         return get_errno(syncfs(arg1));
8695 #endif
8696     case TARGET_NR_kill:
8697         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8698 #ifdef TARGET_NR_rename
8699     case TARGET_NR_rename:
8700         {
8701             void *p2;
8702             p = lock_user_string(arg1);
8703             p2 = lock_user_string(arg2);
8704             if (!p || !p2)
8705                 ret = -TARGET_EFAULT;
8706             else
8707                 ret = get_errno(rename(p, p2));
8708             unlock_user(p2, arg2, 0);
8709             unlock_user(p, arg1, 0);
8710         }
8711         return ret;
8712 #endif
8713 #if defined(TARGET_NR_renameat)
8714     case TARGET_NR_renameat:
8715         {
8716             void *p2;
8717             p  = lock_user_string(arg2);
8718             p2 = lock_user_string(arg4);
8719             if (!p || !p2)
8720                 ret = -TARGET_EFAULT;
8721             else
8722                 ret = get_errno(renameat(arg1, p, arg3, p2));
8723             unlock_user(p2, arg4, 0);
8724             unlock_user(p, arg2, 0);
8725         }
8726         return ret;
8727 #endif
8728 #if defined(TARGET_NR_renameat2)
8729     case TARGET_NR_renameat2:
8730         {
8731             void *p2;
8732             p  = lock_user_string(arg2);
8733             p2 = lock_user_string(arg4);
8734             if (!p || !p2) {
8735                 ret = -TARGET_EFAULT;
8736             } else {
8737                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8738             }
8739             unlock_user(p2, arg4, 0);
8740             unlock_user(p, arg2, 0);
8741         }
8742         return ret;
8743 #endif
8744 #ifdef TARGET_NR_mkdir
8745     case TARGET_NR_mkdir:
8746         if (!(p = lock_user_string(arg1)))
8747             return -TARGET_EFAULT;
8748         ret = get_errno(mkdir(p, arg2));
8749         unlock_user(p, arg1, 0);
8750         return ret;
8751 #endif
8752 #if defined(TARGET_NR_mkdirat)
8753     case TARGET_NR_mkdirat:
8754         if (!(p = lock_user_string(arg2)))
8755             return -TARGET_EFAULT;
8756         ret = get_errno(mkdirat(arg1, p, arg3));
8757         unlock_user(p, arg2, 0);
8758         return ret;
8759 #endif
8760 #ifdef TARGET_NR_rmdir
8761     case TARGET_NR_rmdir:
8762         if (!(p = lock_user_string(arg1)))
8763             return -TARGET_EFAULT;
8764         ret = get_errno(rmdir(p));
8765         unlock_user(p, arg1, 0);
8766         return ret;
8767 #endif
8768     case TARGET_NR_dup:
8769         ret = get_errno(dup(arg1));
8770         if (ret >= 0) {
8771             fd_trans_dup(arg1, ret);
8772         }
8773         return ret;
8774 #ifdef TARGET_NR_pipe
8775     case TARGET_NR_pipe:
8776         return do_pipe(cpu_env, arg1, 0, 0);
8777 #endif
8778 #ifdef TARGET_NR_pipe2
8779     case TARGET_NR_pipe2:
8780         return do_pipe(cpu_env, arg1,
8781                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8782 #endif
8783     case TARGET_NR_times:
8784         {
8785             struct target_tms *tmsp;
8786             struct tms tms;
8787             ret = get_errno(times(&tms));
8788             if (arg1) {
8789                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8790                 if (!tmsp)
8791                     return -TARGET_EFAULT;
8792                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8793                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8794                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8795                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8796             }
8797             if (!is_error(ret))
8798                 ret = host_to_target_clock_t(ret);
8799         }
8800         return ret;
8801     case TARGET_NR_acct:
8802         if (arg1 == 0) {
8803             ret = get_errno(acct(NULL));
8804         } else {
8805             if (!(p = lock_user_string(arg1))) {
8806                 return -TARGET_EFAULT;
8807             }
8808             ret = get_errno(acct(path(p)));
8809             unlock_user(p, arg1, 0);
8810         }
8811         return ret;
8812 #ifdef TARGET_NR_umount2
8813     case TARGET_NR_umount2:
8814         if (!(p = lock_user_string(arg1)))
8815             return -TARGET_EFAULT;
8816         ret = get_errno(umount2(p, arg2));
8817         unlock_user(p, arg1, 0);
8818         return ret;
8819 #endif
8820     case TARGET_NR_ioctl:
8821         return do_ioctl(arg1, arg2, arg3);
8822 #ifdef TARGET_NR_fcntl
8823     case TARGET_NR_fcntl:
8824         return do_fcntl(arg1, arg2, arg3);
8825 #endif
8826     case TARGET_NR_setpgid:
8827         return get_errno(setpgid(arg1, arg2));
8828     case TARGET_NR_umask:
8829         return get_errno(umask(arg1));
8830     case TARGET_NR_chroot:
8831         if (!(p = lock_user_string(arg1)))
8832             return -TARGET_EFAULT;
8833         ret = get_errno(chroot(p));
8834         unlock_user(p, arg1, 0);
8835         return ret;
8836 #ifdef TARGET_NR_dup2
8837     case TARGET_NR_dup2:
8838         ret = get_errno(dup2(arg1, arg2));
8839         if (ret >= 0) {
8840             fd_trans_dup(arg1, arg2);
8841         }
8842         return ret;
8843 #endif
8844 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8845     case TARGET_NR_dup3:
8846     {
8847         int host_flags;
8848 
8849         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8850             return -EINVAL;
8851         }
8852         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8853         ret = get_errno(dup3(arg1, arg2, host_flags));
8854         if (ret >= 0) {
8855             fd_trans_dup(arg1, arg2);
8856         }
8857         return ret;
8858     }
8859 #endif
8860 #ifdef TARGET_NR_getppid /* not on alpha */
8861     case TARGET_NR_getppid:
8862         return get_errno(getppid());
8863 #endif
8864 #ifdef TARGET_NR_getpgrp
8865     case TARGET_NR_getpgrp:
8866         return get_errno(getpgrp());
8867 #endif
8868     case TARGET_NR_setsid:
8869         return get_errno(setsid());
8870 #ifdef TARGET_NR_sigaction
8871     case TARGET_NR_sigaction:
8872         {
8873 #if defined(TARGET_MIPS)
8874 	    struct target_sigaction act, oact, *pact, *old_act;
8875 
8876 	    if (arg2) {
8877                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8878                     return -TARGET_EFAULT;
8879 		act._sa_handler = old_act->_sa_handler;
8880 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8881 		act.sa_flags = old_act->sa_flags;
8882 		unlock_user_struct(old_act, arg2, 0);
8883 		pact = &act;
8884 	    } else {
8885 		pact = NULL;
8886 	    }
8887 
8888         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8889 
8890 	    if (!is_error(ret) && arg3) {
8891                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8892                     return -TARGET_EFAULT;
8893 		old_act->_sa_handler = oact._sa_handler;
8894 		old_act->sa_flags = oact.sa_flags;
8895 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8896 		old_act->sa_mask.sig[1] = 0;
8897 		old_act->sa_mask.sig[2] = 0;
8898 		old_act->sa_mask.sig[3] = 0;
8899 		unlock_user_struct(old_act, arg3, 1);
8900 	    }
8901 #else
8902             struct target_old_sigaction *old_act;
8903             struct target_sigaction act, oact, *pact;
8904             if (arg2) {
8905                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8906                     return -TARGET_EFAULT;
8907                 act._sa_handler = old_act->_sa_handler;
8908                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8909                 act.sa_flags = old_act->sa_flags;
8910 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8911                 act.sa_restorer = old_act->sa_restorer;
8912 #endif
8913                 unlock_user_struct(old_act, arg2, 0);
8914                 pact = &act;
8915             } else {
8916                 pact = NULL;
8917             }
8918             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8919             if (!is_error(ret) && arg3) {
8920                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8921                     return -TARGET_EFAULT;
8922                 old_act->_sa_handler = oact._sa_handler;
8923                 old_act->sa_mask = oact.sa_mask.sig[0];
8924                 old_act->sa_flags = oact.sa_flags;
8925 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8926                 old_act->sa_restorer = oact.sa_restorer;
8927 #endif
8928                 unlock_user_struct(old_act, arg3, 1);
8929             }
8930 #endif
8931         }
8932         return ret;
8933 #endif
8934     case TARGET_NR_rt_sigaction:
8935         {
8936             /*
8937              * For Alpha and SPARC this is a 5 argument syscall, with
8938              * a 'restorer' parameter which must be copied into the
8939              * sa_restorer field of the sigaction struct.
8940              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8941              * and arg5 is the sigsetsize.
8942              */
8943 #if defined(TARGET_ALPHA)
8944             target_ulong sigsetsize = arg4;
8945             target_ulong restorer = arg5;
8946 #elif defined(TARGET_SPARC)
8947             target_ulong restorer = arg4;
8948             target_ulong sigsetsize = arg5;
8949 #else
8950             target_ulong sigsetsize = arg4;
8951             target_ulong restorer = 0;
8952 #endif
8953             struct target_sigaction *act = NULL;
8954             struct target_sigaction *oact = NULL;
8955 
8956             if (sigsetsize != sizeof(target_sigset_t)) {
8957                 return -TARGET_EINVAL;
8958             }
8959             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8960                 return -TARGET_EFAULT;
8961             }
8962             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8963                 ret = -TARGET_EFAULT;
8964             } else {
8965                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
8966                 if (oact) {
8967                     unlock_user_struct(oact, arg3, 1);
8968                 }
8969             }
8970             if (act) {
8971                 unlock_user_struct(act, arg2, 0);
8972             }
8973         }
8974         return ret;
8975 #ifdef TARGET_NR_sgetmask /* not on alpha */
8976     case TARGET_NR_sgetmask:
8977         {
8978             sigset_t cur_set;
8979             abi_ulong target_set;
8980             ret = do_sigprocmask(0, NULL, &cur_set);
8981             if (!ret) {
8982                 host_to_target_old_sigset(&target_set, &cur_set);
8983                 ret = target_set;
8984             }
8985         }
8986         return ret;
8987 #endif
8988 #ifdef TARGET_NR_ssetmask /* not on alpha */
8989     case TARGET_NR_ssetmask:
8990         {
8991             sigset_t set, oset;
8992             abi_ulong target_set = arg1;
8993             target_to_host_old_sigset(&set, &target_set);
8994             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8995             if (!ret) {
8996                 host_to_target_old_sigset(&target_set, &oset);
8997                 ret = target_set;
8998             }
8999         }
9000         return ret;
9001 #endif
9002 #ifdef TARGET_NR_sigprocmask
9003     case TARGET_NR_sigprocmask:
9004         {
9005 #if defined(TARGET_ALPHA)
9006             sigset_t set, oldset;
9007             abi_ulong mask;
9008             int how;
9009 
9010             switch (arg1) {
9011             case TARGET_SIG_BLOCK:
9012                 how = SIG_BLOCK;
9013                 break;
9014             case TARGET_SIG_UNBLOCK:
9015                 how = SIG_UNBLOCK;
9016                 break;
9017             case TARGET_SIG_SETMASK:
9018                 how = SIG_SETMASK;
9019                 break;
9020             default:
9021                 return -TARGET_EINVAL;
9022             }
9023             mask = arg2;
9024             target_to_host_old_sigset(&set, &mask);
9025 
9026             ret = do_sigprocmask(how, &set, &oldset);
9027             if (!is_error(ret)) {
9028                 host_to_target_old_sigset(&mask, &oldset);
9029                 ret = mask;
9030                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9031             }
9032 #else
9033             sigset_t set, oldset, *set_ptr;
9034             int how;
9035 
9036             if (arg2) {
9037                 switch (arg1) {
9038                 case TARGET_SIG_BLOCK:
9039                     how = SIG_BLOCK;
9040                     break;
9041                 case TARGET_SIG_UNBLOCK:
9042                     how = SIG_UNBLOCK;
9043                     break;
9044                 case TARGET_SIG_SETMASK:
9045                     how = SIG_SETMASK;
9046                     break;
9047                 default:
9048                     return -TARGET_EINVAL;
9049                 }
9050                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9051                     return -TARGET_EFAULT;
9052                 target_to_host_old_sigset(&set, p);
9053                 unlock_user(p, arg2, 0);
9054                 set_ptr = &set;
9055             } else {
9056                 how = 0;
9057                 set_ptr = NULL;
9058             }
9059             ret = do_sigprocmask(how, set_ptr, &oldset);
9060             if (!is_error(ret) && arg3) {
9061                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9062                     return -TARGET_EFAULT;
9063                 host_to_target_old_sigset(p, &oldset);
9064                 unlock_user(p, arg3, sizeof(target_sigset_t));
9065             }
9066 #endif
9067         }
9068         return ret;
9069 #endif
9070     case TARGET_NR_rt_sigprocmask:
9071         {
9072             int how = arg1;
9073             sigset_t set, oldset, *set_ptr;
9074 
9075             if (arg4 != sizeof(target_sigset_t)) {
9076                 return -TARGET_EINVAL;
9077             }
9078 
9079             if (arg2) {
9080                 switch(how) {
9081                 case TARGET_SIG_BLOCK:
9082                     how = SIG_BLOCK;
9083                     break;
9084                 case TARGET_SIG_UNBLOCK:
9085                     how = SIG_UNBLOCK;
9086                     break;
9087                 case TARGET_SIG_SETMASK:
9088                     how = SIG_SETMASK;
9089                     break;
9090                 default:
9091                     return -TARGET_EINVAL;
9092                 }
9093                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9094                     return -TARGET_EFAULT;
9095                 target_to_host_sigset(&set, p);
9096                 unlock_user(p, arg2, 0);
9097                 set_ptr = &set;
9098             } else {
9099                 how = 0;
9100                 set_ptr = NULL;
9101             }
9102             ret = do_sigprocmask(how, set_ptr, &oldset);
9103             if (!is_error(ret) && arg3) {
9104                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9105                     return -TARGET_EFAULT;
9106                 host_to_target_sigset(p, &oldset);
9107                 unlock_user(p, arg3, sizeof(target_sigset_t));
9108             }
9109         }
9110         return ret;
9111 #ifdef TARGET_NR_sigpending
9112     case TARGET_NR_sigpending:
9113         {
9114             sigset_t set;
9115             ret = get_errno(sigpending(&set));
9116             if (!is_error(ret)) {
9117                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9118                     return -TARGET_EFAULT;
9119                 host_to_target_old_sigset(p, &set);
9120                 unlock_user(p, arg1, sizeof(target_sigset_t));
9121             }
9122         }
9123         return ret;
9124 #endif
9125     case TARGET_NR_rt_sigpending:
9126         {
9127             sigset_t set;
9128 
9129             /* Yes, this check is >, not != like most. We follow the kernel's
9130              * logic and it does it like this because it implements
9131              * NR_sigpending through the same code path, and in that case
9132              * the old_sigset_t is smaller in size.
9133              */
9134             if (arg2 > sizeof(target_sigset_t)) {
9135                 return -TARGET_EINVAL;
9136             }
9137 
9138             ret = get_errno(sigpending(&set));
9139             if (!is_error(ret)) {
9140                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9141                     return -TARGET_EFAULT;
9142                 host_to_target_sigset(p, &set);
9143                 unlock_user(p, arg1, sizeof(target_sigset_t));
9144             }
9145         }
9146         return ret;
9147 #ifdef TARGET_NR_sigsuspend
9148     case TARGET_NR_sigsuspend:
9149         {
9150             TaskState *ts = cpu->opaque;
9151 #if defined(TARGET_ALPHA)
9152             abi_ulong mask = arg1;
9153             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9154 #else
9155             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9156                 return -TARGET_EFAULT;
9157             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9158             unlock_user(p, arg1, 0);
9159 #endif
9160             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9161                                                SIGSET_T_SIZE));
9162             if (ret != -TARGET_ERESTARTSYS) {
9163                 ts->in_sigsuspend = 1;
9164             }
9165         }
9166         return ret;
9167 #endif
9168     case TARGET_NR_rt_sigsuspend:
9169         {
9170             TaskState *ts = cpu->opaque;
9171 
9172             if (arg2 != sizeof(target_sigset_t)) {
9173                 return -TARGET_EINVAL;
9174             }
9175             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9176                 return -TARGET_EFAULT;
9177             target_to_host_sigset(&ts->sigsuspend_mask, p);
9178             unlock_user(p, arg1, 0);
9179             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9180                                                SIGSET_T_SIZE));
9181             if (ret != -TARGET_ERESTARTSYS) {
9182                 ts->in_sigsuspend = 1;
9183             }
9184         }
9185         return ret;
9186 #ifdef TARGET_NR_rt_sigtimedwait
9187     case TARGET_NR_rt_sigtimedwait:
9188         {
9189             sigset_t set;
9190             struct timespec uts, *puts;
9191             siginfo_t uinfo;
9192 
9193             if (arg4 != sizeof(target_sigset_t)) {
9194                 return -TARGET_EINVAL;
9195             }
9196 
9197             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9198                 return -TARGET_EFAULT;
9199             target_to_host_sigset(&set, p);
9200             unlock_user(p, arg1, 0);
9201             if (arg3) {
9202                 puts = &uts;
9203                 if (target_to_host_timespec(puts, arg3)) {
9204                     return -TARGET_EFAULT;
9205                 }
9206             } else {
9207                 puts = NULL;
9208             }
9209             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9210                                                  SIGSET_T_SIZE));
9211             if (!is_error(ret)) {
9212                 if (arg2) {
9213                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9214                                   0);
9215                     if (!p) {
9216                         return -TARGET_EFAULT;
9217                     }
9218                     host_to_target_siginfo(p, &uinfo);
9219                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9220                 }
9221                 ret = host_to_target_signal(ret);
9222             }
9223         }
9224         return ret;
9225 #endif
9226 #ifdef TARGET_NR_rt_sigtimedwait_time64
9227     case TARGET_NR_rt_sigtimedwait_time64:
9228         {
9229             sigset_t set;
9230             struct timespec uts, *puts;
9231             siginfo_t uinfo;
9232 
9233             if (arg4 != sizeof(target_sigset_t)) {
9234                 return -TARGET_EINVAL;
9235             }
9236 
9237             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9238             if (!p) {
9239                 return -TARGET_EFAULT;
9240             }
9241             target_to_host_sigset(&set, p);
9242             unlock_user(p, arg1, 0);
9243             if (arg3) {
9244                 puts = &uts;
9245                 if (target_to_host_timespec64(puts, arg3)) {
9246                     return -TARGET_EFAULT;
9247                 }
9248             } else {
9249                 puts = NULL;
9250             }
9251             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9252                                                  SIGSET_T_SIZE));
9253             if (!is_error(ret)) {
9254                 if (arg2) {
9255                     p = lock_user(VERIFY_WRITE, arg2,
9256                                   sizeof(target_siginfo_t), 0);
9257                     if (!p) {
9258                         return -TARGET_EFAULT;
9259                     }
9260                     host_to_target_siginfo(p, &uinfo);
9261                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9262                 }
9263                 ret = host_to_target_signal(ret);
9264             }
9265         }
9266         return ret;
9267 #endif
9268     case TARGET_NR_rt_sigqueueinfo:
9269         {
9270             siginfo_t uinfo;
9271 
9272             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9273             if (!p) {
9274                 return -TARGET_EFAULT;
9275             }
9276             target_to_host_siginfo(&uinfo, p);
9277             unlock_user(p, arg3, 0);
9278             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9279         }
9280         return ret;
9281     case TARGET_NR_rt_tgsigqueueinfo:
9282         {
9283             siginfo_t uinfo;
9284 
9285             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9286             if (!p) {
9287                 return -TARGET_EFAULT;
9288             }
9289             target_to_host_siginfo(&uinfo, p);
9290             unlock_user(p, arg4, 0);
9291             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9292         }
9293         return ret;
9294 #ifdef TARGET_NR_sigreturn
9295     case TARGET_NR_sigreturn:
9296         if (block_signals()) {
9297             return -TARGET_ERESTARTSYS;
9298         }
9299         return do_sigreturn(cpu_env);
9300 #endif
9301     case TARGET_NR_rt_sigreturn:
9302         if (block_signals()) {
9303             return -TARGET_ERESTARTSYS;
9304         }
9305         return do_rt_sigreturn(cpu_env);
9306     case TARGET_NR_sethostname:
9307         if (!(p = lock_user_string(arg1)))
9308             return -TARGET_EFAULT;
9309         ret = get_errno(sethostname(p, arg2));
9310         unlock_user(p, arg1, 0);
9311         return ret;
9312 #ifdef TARGET_NR_setrlimit
9313     case TARGET_NR_setrlimit:
9314         {
9315             int resource = target_to_host_resource(arg1);
9316             struct target_rlimit *target_rlim;
9317             struct rlimit rlim;
9318             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9319                 return -TARGET_EFAULT;
9320             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9321             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9322             unlock_user_struct(target_rlim, arg2, 0);
9323             /*
9324              * If we just passed through resource limit settings for memory then
9325              * they would also apply to QEMU's own allocations, and QEMU will
9326              * crash or hang or die if its allocations fail. Ideally we would
9327              * track the guest allocations in QEMU and apply the limits ourselves.
9328              * For now, just tell the guest the call succeeded but don't actually
9329              * limit anything.
9330              */
9331             if (resource != RLIMIT_AS &&
9332                 resource != RLIMIT_DATA &&
9333                 resource != RLIMIT_STACK) {
9334                 return get_errno(setrlimit(resource, &rlim));
9335             } else {
9336                 return 0;
9337             }
9338         }
9339 #endif
9340 #ifdef TARGET_NR_getrlimit
9341     case TARGET_NR_getrlimit:
9342         {
9343             int resource = target_to_host_resource(arg1);
9344             struct target_rlimit *target_rlim;
9345             struct rlimit rlim;
9346 
9347             ret = get_errno(getrlimit(resource, &rlim));
9348             if (!is_error(ret)) {
9349                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9350                     return -TARGET_EFAULT;
9351                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9352                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9353                 unlock_user_struct(target_rlim, arg2, 1);
9354             }
9355         }
9356         return ret;
9357 #endif
9358     case TARGET_NR_getrusage:
9359         {
9360             struct rusage rusage;
9361             ret = get_errno(getrusage(arg1, &rusage));
9362             if (!is_error(ret)) {
9363                 ret = host_to_target_rusage(arg2, &rusage);
9364             }
9365         }
9366         return ret;
9367 #if defined(TARGET_NR_gettimeofday)
9368     case TARGET_NR_gettimeofday:
9369         {
9370             struct timeval tv;
9371             struct timezone tz;
9372 
9373             ret = get_errno(gettimeofday(&tv, &tz));
9374             if (!is_error(ret)) {
9375                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9376                     return -TARGET_EFAULT;
9377                 }
9378                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9379                     return -TARGET_EFAULT;
9380                 }
9381             }
9382         }
9383         return ret;
9384 #endif
9385 #if defined(TARGET_NR_settimeofday)
9386     case TARGET_NR_settimeofday:
9387         {
9388             struct timeval tv, *ptv = NULL;
9389             struct timezone tz, *ptz = NULL;
9390 
9391             if (arg1) {
9392                 if (copy_from_user_timeval(&tv, arg1)) {
9393                     return -TARGET_EFAULT;
9394                 }
9395                 ptv = &tv;
9396             }
9397 
9398             if (arg2) {
9399                 if (copy_from_user_timezone(&tz, arg2)) {
9400                     return -TARGET_EFAULT;
9401                 }
9402                 ptz = &tz;
9403             }
9404 
9405             return get_errno(settimeofday(ptv, ptz));
9406         }
9407 #endif
9408 #if defined(TARGET_NR_select)
9409     case TARGET_NR_select:
9410 #if defined(TARGET_WANT_NI_OLD_SELECT)
9411         /* some architectures used to have old_select here
9412          * but now ENOSYS it.
9413          */
9414         ret = -TARGET_ENOSYS;
9415 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9416         ret = do_old_select(arg1);
9417 #else
9418         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9419 #endif
9420         return ret;
9421 #endif
9422 #ifdef TARGET_NR_pselect6
9423     case TARGET_NR_pselect6:
9424         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9425 #endif
9426 #ifdef TARGET_NR_pselect6_time64
9427     case TARGET_NR_pselect6_time64:
9428         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9429 #endif
9430 #ifdef TARGET_NR_symlink
9431     case TARGET_NR_symlink:
9432         {
9433             void *p2;
9434             p = lock_user_string(arg1);
9435             p2 = lock_user_string(arg2);
9436             if (!p || !p2)
9437                 ret = -TARGET_EFAULT;
9438             else
9439                 ret = get_errno(symlink(p, p2));
9440             unlock_user(p2, arg2, 0);
9441             unlock_user(p, arg1, 0);
9442         }
9443         return ret;
9444 #endif
9445 #if defined(TARGET_NR_symlinkat)
9446     case TARGET_NR_symlinkat:
9447         {
9448             void *p2;
9449             p  = lock_user_string(arg1);
9450             p2 = lock_user_string(arg3);
9451             if (!p || !p2)
9452                 ret = -TARGET_EFAULT;
9453             else
9454                 ret = get_errno(symlinkat(p, arg2, p2));
9455             unlock_user(p2, arg3, 0);
9456             unlock_user(p, arg1, 0);
9457         }
9458         return ret;
9459 #endif
9460 #ifdef TARGET_NR_readlink
9461     case TARGET_NR_readlink:
9462         {
9463             void *p2;
9464             p = lock_user_string(arg1);
9465             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9466             if (!p || !p2) {
9467                 ret = -TARGET_EFAULT;
9468             } else if (!arg3) {
9469                 /* Short circuit this for the magic exe check. */
9470                 ret = -TARGET_EINVAL;
9471             } else if (is_proc_myself((const char *)p, "exe")) {
9472                 char real[PATH_MAX], *temp;
9473                 temp = realpath(exec_path, real);
9474                 /* Return value is # of bytes that we wrote to the buffer. */
9475                 if (temp == NULL) {
9476                     ret = get_errno(-1);
9477                 } else {
9478                     /* Don't worry about sign mismatch as earlier mapping
9479                      * logic would have thrown a bad address error. */
9480                     ret = MIN(strlen(real), arg3);
9481                     /* We cannot NUL terminate the string. */
9482                     memcpy(p2, real, ret);
9483                 }
9484             } else {
9485                 ret = get_errno(readlink(path(p), p2, arg3));
9486             }
9487             unlock_user(p2, arg2, ret);
9488             unlock_user(p, arg1, 0);
9489         }
9490         return ret;
9491 #endif
9492 #if defined(TARGET_NR_readlinkat)
9493     case TARGET_NR_readlinkat:
9494         {
9495             void *p2;
9496             p  = lock_user_string(arg2);
9497             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9498             if (!p || !p2) {
9499                 ret = -TARGET_EFAULT;
9500             } else if (is_proc_myself((const char *)p, "exe")) {
9501                 char real[PATH_MAX], *temp;
9502                 temp = realpath(exec_path, real);
9503                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9504                 snprintf((char *)p2, arg4, "%s", real);
9505             } else {
9506                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9507             }
9508             unlock_user(p2, arg3, ret);
9509             unlock_user(p, arg2, 0);
9510         }
9511         return ret;
9512 #endif
9513 #ifdef TARGET_NR_swapon
9514     case TARGET_NR_swapon:
9515         if (!(p = lock_user_string(arg1)))
9516             return -TARGET_EFAULT;
9517         ret = get_errno(swapon(p, arg2));
9518         unlock_user(p, arg1, 0);
9519         return ret;
9520 #endif
9521     case TARGET_NR_reboot:
9522         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9523            /* arg4 must be ignored in all other cases */
9524            p = lock_user_string(arg4);
9525            if (!p) {
9526                return -TARGET_EFAULT;
9527            }
9528            ret = get_errno(reboot(arg1, arg2, arg3, p));
9529            unlock_user(p, arg4, 0);
9530         } else {
9531            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9532         }
9533         return ret;
9534 #ifdef TARGET_NR_mmap
9535     case TARGET_NR_mmap:
9536 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9537     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9538     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9539     || defined(TARGET_S390X)
9540         {
9541             abi_ulong *v;
9542             abi_ulong v1, v2, v3, v4, v5, v6;
9543             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9544                 return -TARGET_EFAULT;
9545             v1 = tswapal(v[0]);
9546             v2 = tswapal(v[1]);
9547             v3 = tswapal(v[2]);
9548             v4 = tswapal(v[3]);
9549             v5 = tswapal(v[4]);
9550             v6 = tswapal(v[5]);
9551             unlock_user(v, arg1, 0);
9552             ret = get_errno(target_mmap(v1, v2, v3,
9553                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9554                                         v5, v6));
9555         }
9556 #else
9557         /* mmap pointers are always untagged */
9558         ret = get_errno(target_mmap(arg1, arg2, arg3,
9559                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9560                                     arg5,
9561                                     arg6));
9562 #endif
9563         return ret;
9564 #endif
9565 #ifdef TARGET_NR_mmap2
9566     case TARGET_NR_mmap2:
9567 #ifndef MMAP_SHIFT
9568 #define MMAP_SHIFT 12
9569 #endif
9570         ret = target_mmap(arg1, arg2, arg3,
9571                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9572                           arg5, arg6 << MMAP_SHIFT);
9573         return get_errno(ret);
9574 #endif
9575     case TARGET_NR_munmap:
9576         arg1 = cpu_untagged_addr(cpu, arg1);
9577         return get_errno(target_munmap(arg1, arg2));
9578     case TARGET_NR_mprotect:
9579         arg1 = cpu_untagged_addr(cpu, arg1);
9580         {
9581             TaskState *ts = cpu->opaque;
9582             /* Special hack to detect libc making the stack executable.  */
9583             if ((arg3 & PROT_GROWSDOWN)
9584                 && arg1 >= ts->info->stack_limit
9585                 && arg1 <= ts->info->start_stack) {
9586                 arg3 &= ~PROT_GROWSDOWN;
9587                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9588                 arg1 = ts->info->stack_limit;
9589             }
9590         }
9591         return get_errno(target_mprotect(arg1, arg2, arg3));
9592 #ifdef TARGET_NR_mremap
9593     case TARGET_NR_mremap:
9594         arg1 = cpu_untagged_addr(cpu, arg1);
9595         /* mremap new_addr (arg5) is always untagged */
9596         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9597 #endif
9598         /* ??? msync/mlock/munlock are broken for softmmu.  */
9599 #ifdef TARGET_NR_msync
9600     case TARGET_NR_msync:
9601         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9602 #endif
9603 #ifdef TARGET_NR_mlock
9604     case TARGET_NR_mlock:
9605         return get_errno(mlock(g2h(cpu, arg1), arg2));
9606 #endif
9607 #ifdef TARGET_NR_munlock
9608     case TARGET_NR_munlock:
9609         return get_errno(munlock(g2h(cpu, arg1), arg2));
9610 #endif
9611 #ifdef TARGET_NR_mlockall
9612     case TARGET_NR_mlockall:
9613         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9614 #endif
9615 #ifdef TARGET_NR_munlockall
9616     case TARGET_NR_munlockall:
9617         return get_errno(munlockall());
9618 #endif
9619 #ifdef TARGET_NR_truncate
9620     case TARGET_NR_truncate:
9621         if (!(p = lock_user_string(arg1)))
9622             return -TARGET_EFAULT;
9623         ret = get_errno(truncate(p, arg2));
9624         unlock_user(p, arg1, 0);
9625         return ret;
9626 #endif
9627 #ifdef TARGET_NR_ftruncate
9628     case TARGET_NR_ftruncate:
9629         return get_errno(ftruncate(arg1, arg2));
9630 #endif
9631     case TARGET_NR_fchmod:
9632         return get_errno(fchmod(arg1, arg2));
9633 #if defined(TARGET_NR_fchmodat)
9634     case TARGET_NR_fchmodat:
9635         if (!(p = lock_user_string(arg2)))
9636             return -TARGET_EFAULT;
9637         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9638         unlock_user(p, arg2, 0);
9639         return ret;
9640 #endif
9641     case TARGET_NR_getpriority:
9642         /* Note that negative values are valid for getpriority, so we must
9643            differentiate based on errno settings.  */
9644         errno = 0;
9645         ret = getpriority(arg1, arg2);
9646         if (ret == -1 && errno != 0) {
9647             return -host_to_target_errno(errno);
9648         }
9649 #ifdef TARGET_ALPHA
9650         /* Return value is the unbiased priority.  Signal no error.  */
9651         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9652 #else
9653         /* Return value is a biased priority to avoid negative numbers.  */
9654         ret = 20 - ret;
9655 #endif
9656         return ret;
9657     case TARGET_NR_setpriority:
9658         return get_errno(setpriority(arg1, arg2, arg3));
9659 #ifdef TARGET_NR_statfs
9660     case TARGET_NR_statfs:
9661         if (!(p = lock_user_string(arg1))) {
9662             return -TARGET_EFAULT;
9663         }
9664         ret = get_errno(statfs(path(p), &stfs));
9665         unlock_user(p, arg1, 0);
9666     convert_statfs:
9667         if (!is_error(ret)) {
9668             struct target_statfs *target_stfs;
9669 
9670             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9671                 return -TARGET_EFAULT;
9672             __put_user(stfs.f_type, &target_stfs->f_type);
9673             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9674             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9675             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9676             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9677             __put_user(stfs.f_files, &target_stfs->f_files);
9678             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9679             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9680             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9681             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9682             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9683 #ifdef _STATFS_F_FLAGS
9684             __put_user(stfs.f_flags, &target_stfs->f_flags);
9685 #else
9686             __put_user(0, &target_stfs->f_flags);
9687 #endif
9688             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9689             unlock_user_struct(target_stfs, arg2, 1);
9690         }
9691         return ret;
9692 #endif
9693 #ifdef TARGET_NR_fstatfs
9694     case TARGET_NR_fstatfs:
9695         ret = get_errno(fstatfs(arg1, &stfs));
9696         goto convert_statfs;
9697 #endif
9698 #ifdef TARGET_NR_statfs64
9699     case TARGET_NR_statfs64:
9700         if (!(p = lock_user_string(arg1))) {
9701             return -TARGET_EFAULT;
9702         }
9703         ret = get_errno(statfs(path(p), &stfs));
9704         unlock_user(p, arg1, 0);
9705     convert_statfs64:
9706         if (!is_error(ret)) {
9707             struct target_statfs64 *target_stfs;
9708 
9709             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9710                 return -TARGET_EFAULT;
9711             __put_user(stfs.f_type, &target_stfs->f_type);
9712             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9713             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9714             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9715             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9716             __put_user(stfs.f_files, &target_stfs->f_files);
9717             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9718             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9719             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9720             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9721             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9722 #ifdef _STATFS_F_FLAGS
9723             __put_user(stfs.f_flags, &target_stfs->f_flags);
9724 #else
9725             __put_user(0, &target_stfs->f_flags);
9726 #endif
9727             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9728             unlock_user_struct(target_stfs, arg3, 1);
9729         }
9730         return ret;
9731     case TARGET_NR_fstatfs64:
9732         ret = get_errno(fstatfs(arg1, &stfs));
9733         goto convert_statfs64;
9734 #endif
9735 #ifdef TARGET_NR_socketcall
9736     case TARGET_NR_socketcall:
9737         return do_socketcall(arg1, arg2);
9738 #endif
9739 #ifdef TARGET_NR_accept
9740     case TARGET_NR_accept:
9741         return do_accept4(arg1, arg2, arg3, 0);
9742 #endif
9743 #ifdef TARGET_NR_accept4
9744     case TARGET_NR_accept4:
9745         return do_accept4(arg1, arg2, arg3, arg4);
9746 #endif
9747 #ifdef TARGET_NR_bind
9748     case TARGET_NR_bind:
9749         return do_bind(arg1, arg2, arg3);
9750 #endif
9751 #ifdef TARGET_NR_connect
9752     case TARGET_NR_connect:
9753         return do_connect(arg1, arg2, arg3);
9754 #endif
9755 #ifdef TARGET_NR_getpeername
9756     case TARGET_NR_getpeername:
9757         return do_getpeername(arg1, arg2, arg3);
9758 #endif
9759 #ifdef TARGET_NR_getsockname
9760     case TARGET_NR_getsockname:
9761         return do_getsockname(arg1, arg2, arg3);
9762 #endif
9763 #ifdef TARGET_NR_getsockopt
9764     case TARGET_NR_getsockopt:
9765         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9766 #endif
9767 #ifdef TARGET_NR_listen
9768     case TARGET_NR_listen:
9769         return get_errno(listen(arg1, arg2));
9770 #endif
9771 #ifdef TARGET_NR_recv
9772     case TARGET_NR_recv:
9773         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9774 #endif
9775 #ifdef TARGET_NR_recvfrom
9776     case TARGET_NR_recvfrom:
9777         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9778 #endif
9779 #ifdef TARGET_NR_recvmsg
9780     case TARGET_NR_recvmsg:
9781         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9782 #endif
9783 #ifdef TARGET_NR_send
9784     case TARGET_NR_send:
9785         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9786 #endif
9787 #ifdef TARGET_NR_sendmsg
9788     case TARGET_NR_sendmsg:
9789         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9790 #endif
9791 #ifdef TARGET_NR_sendmmsg
9792     case TARGET_NR_sendmmsg:
9793         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9794 #endif
9795 #ifdef TARGET_NR_recvmmsg
9796     case TARGET_NR_recvmmsg:
9797         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9798 #endif
9799 #ifdef TARGET_NR_sendto
9800     case TARGET_NR_sendto:
9801         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9802 #endif
9803 #ifdef TARGET_NR_shutdown
9804     case TARGET_NR_shutdown:
9805         return get_errno(shutdown(arg1, arg2));
9806 #endif
9807 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9808     case TARGET_NR_getrandom:
9809         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9810         if (!p) {
9811             return -TARGET_EFAULT;
9812         }
9813         ret = get_errno(getrandom(p, arg2, arg3));
9814         unlock_user(p, arg1, ret);
9815         return ret;
9816 #endif
9817 #ifdef TARGET_NR_socket
9818     case TARGET_NR_socket:
9819         return do_socket(arg1, arg2, arg3);
9820 #endif
9821 #ifdef TARGET_NR_socketpair
9822     case TARGET_NR_socketpair:
9823         return do_socketpair(arg1, arg2, arg3, arg4);
9824 #endif
9825 #ifdef TARGET_NR_setsockopt
9826     case TARGET_NR_setsockopt:
9827         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9828 #endif
9829 #if defined(TARGET_NR_syslog)
9830     case TARGET_NR_syslog:
9831         {
9832             int len = arg2;
9833 
9834             switch (arg1) {
9835             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9836             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9837             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9838             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9839             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9840             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9841             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9842             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9843                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9844             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9845             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9846             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9847                 {
9848                     if (len < 0) {
9849                         return -TARGET_EINVAL;
9850                     }
9851                     if (len == 0) {
9852                         return 0;
9853                     }
9854                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9855                     if (!p) {
9856                         return -TARGET_EFAULT;
9857                     }
9858                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9859                     unlock_user(p, arg2, arg3);
9860                 }
9861                 return ret;
9862             default:
9863                 return -TARGET_EINVAL;
9864             }
9865         }
9866         break;
9867 #endif
9868     case TARGET_NR_setitimer:
9869         {
9870             struct itimerval value, ovalue, *pvalue;
9871 
9872             if (arg2) {
9873                 pvalue = &value;
9874                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9875                     || copy_from_user_timeval(&pvalue->it_value,
9876                                               arg2 + sizeof(struct target_timeval)))
9877                     return -TARGET_EFAULT;
9878             } else {
9879                 pvalue = NULL;
9880             }
9881             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9882             if (!is_error(ret) && arg3) {
9883                 if (copy_to_user_timeval(arg3,
9884                                          &ovalue.it_interval)
9885                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9886                                             &ovalue.it_value))
9887                     return -TARGET_EFAULT;
9888             }
9889         }
9890         return ret;
9891     case TARGET_NR_getitimer:
9892         {
9893             struct itimerval value;
9894 
9895             ret = get_errno(getitimer(arg1, &value));
9896             if (!is_error(ret) && arg2) {
9897                 if (copy_to_user_timeval(arg2,
9898                                          &value.it_interval)
9899                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9900                                             &value.it_value))
9901                     return -TARGET_EFAULT;
9902             }
9903         }
9904         return ret;
9905 #ifdef TARGET_NR_stat
9906     case TARGET_NR_stat:
9907         if (!(p = lock_user_string(arg1))) {
9908             return -TARGET_EFAULT;
9909         }
9910         ret = get_errno(stat(path(p), &st));
9911         unlock_user(p, arg1, 0);
9912         goto do_stat;
9913 #endif
9914 #ifdef TARGET_NR_lstat
9915     case TARGET_NR_lstat:
9916         if (!(p = lock_user_string(arg1))) {
9917             return -TARGET_EFAULT;
9918         }
9919         ret = get_errno(lstat(path(p), &st));
9920         unlock_user(p, arg1, 0);
9921         goto do_stat;
9922 #endif
9923 #ifdef TARGET_NR_fstat
9924     case TARGET_NR_fstat:
9925         {
9926             ret = get_errno(fstat(arg1, &st));
9927 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9928         do_stat:
9929 #endif
9930             if (!is_error(ret)) {
9931                 struct target_stat *target_st;
9932 
9933                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9934                     return -TARGET_EFAULT;
9935                 memset(target_st, 0, sizeof(*target_st));
9936                 __put_user(st.st_dev, &target_st->st_dev);
9937                 __put_user(st.st_ino, &target_st->st_ino);
9938                 __put_user(st.st_mode, &target_st->st_mode);
9939                 __put_user(st.st_uid, &target_st->st_uid);
9940                 __put_user(st.st_gid, &target_st->st_gid);
9941                 __put_user(st.st_nlink, &target_st->st_nlink);
9942                 __put_user(st.st_rdev, &target_st->st_rdev);
9943                 __put_user(st.st_size, &target_st->st_size);
9944                 __put_user(st.st_blksize, &target_st->st_blksize);
9945                 __put_user(st.st_blocks, &target_st->st_blocks);
9946                 __put_user(st.st_atime, &target_st->target_st_atime);
9947                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9948                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9949 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9950                 __put_user(st.st_atim.tv_nsec,
9951                            &target_st->target_st_atime_nsec);
9952                 __put_user(st.st_mtim.tv_nsec,
9953                            &target_st->target_st_mtime_nsec);
9954                 __put_user(st.st_ctim.tv_nsec,
9955                            &target_st->target_st_ctime_nsec);
9956 #endif
9957                 unlock_user_struct(target_st, arg2, 1);
9958             }
9959         }
9960         return ret;
9961 #endif
9962     case TARGET_NR_vhangup:
9963         return get_errno(vhangup());
9964 #ifdef TARGET_NR_syscall
9965     case TARGET_NR_syscall:
9966         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9967                           arg6, arg7, arg8, 0);
9968 #endif
9969 #if defined(TARGET_NR_wait4)
9970     case TARGET_NR_wait4:
9971         {
9972             int status;
9973             abi_long status_ptr = arg2;
9974             struct rusage rusage, *rusage_ptr;
9975             abi_ulong target_rusage = arg4;
9976             abi_long rusage_err;
9977             if (target_rusage)
9978                 rusage_ptr = &rusage;
9979             else
9980                 rusage_ptr = NULL;
9981             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9982             if (!is_error(ret)) {
9983                 if (status_ptr && ret) {
9984                     status = host_to_target_waitstatus(status);
9985                     if (put_user_s32(status, status_ptr))
9986                         return -TARGET_EFAULT;
9987                 }
9988                 if (target_rusage) {
9989                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9990                     if (rusage_err) {
9991                         ret = rusage_err;
9992                     }
9993                 }
9994             }
9995         }
9996         return ret;
9997 #endif
9998 #ifdef TARGET_NR_swapoff
9999     case TARGET_NR_swapoff:
10000         if (!(p = lock_user_string(arg1)))
10001             return -TARGET_EFAULT;
10002         ret = get_errno(swapoff(p));
10003         unlock_user(p, arg1, 0);
10004         return ret;
10005 #endif
10006     case TARGET_NR_sysinfo:
10007         {
10008             struct target_sysinfo *target_value;
10009             struct sysinfo value;
10010             ret = get_errno(sysinfo(&value));
10011             if (!is_error(ret) && arg1)
10012             {
10013                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10014                     return -TARGET_EFAULT;
10015                 __put_user(value.uptime, &target_value->uptime);
10016                 __put_user(value.loads[0], &target_value->loads[0]);
10017                 __put_user(value.loads[1], &target_value->loads[1]);
10018                 __put_user(value.loads[2], &target_value->loads[2]);
10019                 __put_user(value.totalram, &target_value->totalram);
10020                 __put_user(value.freeram, &target_value->freeram);
10021                 __put_user(value.sharedram, &target_value->sharedram);
10022                 __put_user(value.bufferram, &target_value->bufferram);
10023                 __put_user(value.totalswap, &target_value->totalswap);
10024                 __put_user(value.freeswap, &target_value->freeswap);
10025                 __put_user(value.procs, &target_value->procs);
10026                 __put_user(value.totalhigh, &target_value->totalhigh);
10027                 __put_user(value.freehigh, &target_value->freehigh);
10028                 __put_user(value.mem_unit, &target_value->mem_unit);
10029                 unlock_user_struct(target_value, arg1, 1);
10030             }
10031         }
10032         return ret;
10033 #ifdef TARGET_NR_ipc
10034     case TARGET_NR_ipc:
10035         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10036 #endif
10037 #ifdef TARGET_NR_semget
10038     case TARGET_NR_semget:
10039         return get_errno(semget(arg1, arg2, arg3));
10040 #endif
10041 #ifdef TARGET_NR_semop
10042     case TARGET_NR_semop:
10043         return do_semtimedop(arg1, arg2, arg3, 0, false);
10044 #endif
10045 #ifdef TARGET_NR_semtimedop
10046     case TARGET_NR_semtimedop:
10047         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10048 #endif
10049 #ifdef TARGET_NR_semtimedop_time64
10050     case TARGET_NR_semtimedop_time64:
10051         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10052 #endif
10053 #ifdef TARGET_NR_semctl
10054     case TARGET_NR_semctl:
10055         return do_semctl(arg1, arg2, arg3, arg4);
10056 #endif
10057 #ifdef TARGET_NR_msgctl
10058     case TARGET_NR_msgctl:
10059         return do_msgctl(arg1, arg2, arg3);
10060 #endif
10061 #ifdef TARGET_NR_msgget
10062     case TARGET_NR_msgget:
10063         return get_errno(msgget(arg1, arg2));
10064 #endif
10065 #ifdef TARGET_NR_msgrcv
10066     case TARGET_NR_msgrcv:
10067         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10068 #endif
10069 #ifdef TARGET_NR_msgsnd
10070     case TARGET_NR_msgsnd:
10071         return do_msgsnd(arg1, arg2, arg3, arg4);
10072 #endif
10073 #ifdef TARGET_NR_shmget
10074     case TARGET_NR_shmget:
10075         return get_errno(shmget(arg1, arg2, arg3));
10076 #endif
10077 #ifdef TARGET_NR_shmctl
10078     case TARGET_NR_shmctl:
10079         return do_shmctl(arg1, arg2, arg3);
10080 #endif
10081 #ifdef TARGET_NR_shmat
10082     case TARGET_NR_shmat:
10083         return do_shmat(cpu_env, arg1, arg2, arg3);
10084 #endif
10085 #ifdef TARGET_NR_shmdt
10086     case TARGET_NR_shmdt:
10087         return do_shmdt(arg1);
10088 #endif
10089     case TARGET_NR_fsync:
10090         return get_errno(fsync(arg1));
10091     case TARGET_NR_clone:
10092         /* Linux manages to have three different orderings for its
10093          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10094          * match the kernel's CONFIG_CLONE_* settings.
10095          * Microblaze is further special in that it uses a sixth
10096          * implicit argument to clone for the TLS pointer.
10097          */
10098 #if defined(TARGET_MICROBLAZE)
10099         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10100 #elif defined(TARGET_CLONE_BACKWARDS)
10101         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10102 #elif defined(TARGET_CLONE_BACKWARDS2)
10103         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10104 #else
10105         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10106 #endif
10107         return ret;
10108 #ifdef __NR_exit_group
10109         /* new thread calls */
10110     case TARGET_NR_exit_group:
10111         preexit_cleanup(cpu_env, arg1);
10112         return get_errno(exit_group(arg1));
10113 #endif
10114     case TARGET_NR_setdomainname:
10115         if (!(p = lock_user_string(arg1)))
10116             return -TARGET_EFAULT;
10117         ret = get_errno(setdomainname(p, arg2));
10118         unlock_user(p, arg1, 0);
10119         return ret;
10120     case TARGET_NR_uname:
10121         /* no need to transcode because we use the linux syscall */
10122         {
10123             struct new_utsname * buf;
10124 
10125             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10126                 return -TARGET_EFAULT;
10127             ret = get_errno(sys_uname(buf));
10128             if (!is_error(ret)) {
10129                 /* Overwrite the native machine name with whatever is being
10130                    emulated. */
10131                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10132                           sizeof(buf->machine));
10133                 /* Allow the user to override the reported release.  */
10134                 if (qemu_uname_release && *qemu_uname_release) {
10135                     g_strlcpy(buf->release, qemu_uname_release,
10136                               sizeof(buf->release));
10137                 }
10138             }
10139             unlock_user_struct(buf, arg1, 1);
10140         }
10141         return ret;
10142 #ifdef TARGET_I386
10143     case TARGET_NR_modify_ldt:
10144         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10145 #if !defined(TARGET_X86_64)
10146     case TARGET_NR_vm86:
10147         return do_vm86(cpu_env, arg1, arg2);
10148 #endif
10149 #endif
10150 #if defined(TARGET_NR_adjtimex)
10151     case TARGET_NR_adjtimex:
10152         {
10153             struct timex host_buf;
10154 
10155             if (target_to_host_timex(&host_buf, arg1) != 0) {
10156                 return -TARGET_EFAULT;
10157             }
10158             ret = get_errno(adjtimex(&host_buf));
10159             if (!is_error(ret)) {
10160                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10161                     return -TARGET_EFAULT;
10162                 }
10163             }
10164         }
10165         return ret;
10166 #endif
10167 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10168     case TARGET_NR_clock_adjtime:
10169         {
10170             struct timex htx, *phtx = &htx;
10171 
10172             if (target_to_host_timex(phtx, arg2) != 0) {
10173                 return -TARGET_EFAULT;
10174             }
10175             ret = get_errno(clock_adjtime(arg1, phtx));
10176             if (!is_error(ret) && phtx) {
10177                 if (host_to_target_timex(arg2, phtx) != 0) {
10178                     return -TARGET_EFAULT;
10179                 }
10180             }
10181         }
10182         return ret;
10183 #endif
10184 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10185     case TARGET_NR_clock_adjtime64:
10186         {
10187             struct timex htx;
10188 
10189             if (target_to_host_timex64(&htx, arg2) != 0) {
10190                 return -TARGET_EFAULT;
10191             }
10192             ret = get_errno(clock_adjtime(arg1, &htx));
10193             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10194                     return -TARGET_EFAULT;
10195             }
10196         }
10197         return ret;
10198 #endif
10199     case TARGET_NR_getpgid:
10200         return get_errno(getpgid(arg1));
10201     case TARGET_NR_fchdir:
10202         return get_errno(fchdir(arg1));
10203     case TARGET_NR_personality:
10204         return get_errno(personality(arg1));
10205 #ifdef TARGET_NR__llseek /* Not on alpha */
10206     case TARGET_NR__llseek:
10207         {
10208             int64_t res;
10209 #if !defined(__NR_llseek)
10210             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10211             if (res == -1) {
10212                 ret = get_errno(res);
10213             } else {
10214                 ret = 0;
10215             }
10216 #else
10217             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10218 #endif
10219             if ((ret == 0) && put_user_s64(res, arg4)) {
10220                 return -TARGET_EFAULT;
10221             }
10222         }
10223         return ret;
10224 #endif
10225 #ifdef TARGET_NR_getdents
10226     case TARGET_NR_getdents:
10227 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10228 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10229         {
10230             struct target_dirent *target_dirp;
10231             struct linux_dirent *dirp;
10232             abi_long count = arg3;
10233 
10234             dirp = g_try_malloc(count);
10235             if (!dirp) {
10236                 return -TARGET_ENOMEM;
10237             }
10238 
10239             ret = get_errno(sys_getdents(arg1, dirp, count));
10240             if (!is_error(ret)) {
10241                 struct linux_dirent *de;
10242 		struct target_dirent *tde;
10243                 int len = ret;
10244                 int reclen, treclen;
10245 		int count1, tnamelen;
10246 
10247 		count1 = 0;
10248                 de = dirp;
10249                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10250                     return -TARGET_EFAULT;
10251 		tde = target_dirp;
10252                 while (len > 0) {
10253                     reclen = de->d_reclen;
10254                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10255                     assert(tnamelen >= 0);
10256                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10257                     assert(count1 + treclen <= count);
10258                     tde->d_reclen = tswap16(treclen);
10259                     tde->d_ino = tswapal(de->d_ino);
10260                     tde->d_off = tswapal(de->d_off);
10261                     memcpy(tde->d_name, de->d_name, tnamelen);
10262                     de = (struct linux_dirent *)((char *)de + reclen);
10263                     len -= reclen;
10264                     tde = (struct target_dirent *)((char *)tde + treclen);
10265 		    count1 += treclen;
10266                 }
10267 		ret = count1;
10268                 unlock_user(target_dirp, arg2, ret);
10269             }
10270             g_free(dirp);
10271         }
10272 #else
10273         {
10274             struct linux_dirent *dirp;
10275             abi_long count = arg3;
10276 
10277             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10278                 return -TARGET_EFAULT;
10279             ret = get_errno(sys_getdents(arg1, dirp, count));
10280             if (!is_error(ret)) {
10281                 struct linux_dirent *de;
10282                 int len = ret;
10283                 int reclen;
10284                 de = dirp;
10285                 while (len > 0) {
10286                     reclen = de->d_reclen;
10287                     if (reclen > len)
10288                         break;
10289                     de->d_reclen = tswap16(reclen);
10290                     tswapls(&de->d_ino);
10291                     tswapls(&de->d_off);
10292                     de = (struct linux_dirent *)((char *)de + reclen);
10293                     len -= reclen;
10294                 }
10295             }
10296             unlock_user(dirp, arg2, ret);
10297         }
10298 #endif
10299 #else
10300         /* Implement getdents in terms of getdents64 */
10301         {
10302             struct linux_dirent64 *dirp;
10303             abi_long count = arg3;
10304 
10305             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10306             if (!dirp) {
10307                 return -TARGET_EFAULT;
10308             }
10309             ret = get_errno(sys_getdents64(arg1, dirp, count));
10310             if (!is_error(ret)) {
10311                 /* Convert the dirent64 structs to target dirent.  We do this
10312                  * in-place, since we can guarantee that a target_dirent is no
10313                  * larger than a dirent64; however this means we have to be
10314                  * careful to read everything before writing in the new format.
10315                  */
10316                 struct linux_dirent64 *de;
10317                 struct target_dirent *tde;
10318                 int len = ret;
10319                 int tlen = 0;
10320 
10321                 de = dirp;
10322                 tde = (struct target_dirent *)dirp;
10323                 while (len > 0) {
10324                     int namelen, treclen;
10325                     int reclen = de->d_reclen;
10326                     uint64_t ino = de->d_ino;
10327                     int64_t off = de->d_off;
10328                     uint8_t type = de->d_type;
10329 
10330                     namelen = strlen(de->d_name);
10331                     treclen = offsetof(struct target_dirent, d_name)
10332                         + namelen + 2;
10333                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10334 
10335                     memmove(tde->d_name, de->d_name, namelen + 1);
10336                     tde->d_ino = tswapal(ino);
10337                     tde->d_off = tswapal(off);
10338                     tde->d_reclen = tswap16(treclen);
10339                     /* The target_dirent type is in what was formerly a padding
10340                      * byte at the end of the structure:
10341                      */
10342                     *(((char *)tde) + treclen - 1) = type;
10343 
10344                     de = (struct linux_dirent64 *)((char *)de + reclen);
10345                     tde = (struct target_dirent *)((char *)tde + treclen);
10346                     len -= reclen;
10347                     tlen += treclen;
10348                 }
10349                 ret = tlen;
10350             }
10351             unlock_user(dirp, arg2, ret);
10352         }
10353 #endif
10354         return ret;
10355 #endif /* TARGET_NR_getdents */
10356 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10357     case TARGET_NR_getdents64:
10358         {
10359             struct linux_dirent64 *dirp;
10360             abi_long count = arg3;
10361             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10362                 return -TARGET_EFAULT;
10363             ret = get_errno(sys_getdents64(arg1, dirp, count));
10364             if (!is_error(ret)) {
10365                 struct linux_dirent64 *de;
10366                 int len = ret;
10367                 int reclen;
10368                 de = dirp;
10369                 while (len > 0) {
10370                     reclen = de->d_reclen;
10371                     if (reclen > len)
10372                         break;
10373                     de->d_reclen = tswap16(reclen);
10374                     tswap64s((uint64_t *)&de->d_ino);
10375                     tswap64s((uint64_t *)&de->d_off);
10376                     de = (struct linux_dirent64 *)((char *)de + reclen);
10377                     len -= reclen;
10378                 }
10379             }
10380             unlock_user(dirp, arg2, ret);
10381         }
10382         return ret;
10383 #endif /* TARGET_NR_getdents64 */
10384 #if defined(TARGET_NR__newselect)
10385     case TARGET_NR__newselect:
10386         return do_select(arg1, arg2, arg3, arg4, arg5);
10387 #endif
10388 #ifdef TARGET_NR_poll
10389     case TARGET_NR_poll:
10390         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10391 #endif
10392 #ifdef TARGET_NR_ppoll
10393     case TARGET_NR_ppoll:
10394         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10395 #endif
10396 #ifdef TARGET_NR_ppoll_time64
10397     case TARGET_NR_ppoll_time64:
10398         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10399 #endif
10400     case TARGET_NR_flock:
10401         /* NOTE: the flock constant seems to be the same for every
10402            Linux platform */
10403         return get_errno(safe_flock(arg1, arg2));
10404     case TARGET_NR_readv:
10405         {
10406             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10407             if (vec != NULL) {
10408                 ret = get_errno(safe_readv(arg1, vec, arg3));
10409                 unlock_iovec(vec, arg2, arg3, 1);
10410             } else {
10411                 ret = -host_to_target_errno(errno);
10412             }
10413         }
10414         return ret;
10415     case TARGET_NR_writev:
10416         {
10417             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10418             if (vec != NULL) {
10419                 ret = get_errno(safe_writev(arg1, vec, arg3));
10420                 unlock_iovec(vec, arg2, arg3, 0);
10421             } else {
10422                 ret = -host_to_target_errno(errno);
10423             }
10424         }
10425         return ret;
10426 #if defined(TARGET_NR_preadv)
10427     case TARGET_NR_preadv:
10428         {
10429             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10430             if (vec != NULL) {
10431                 unsigned long low, high;
10432 
10433                 target_to_host_low_high(arg4, arg5, &low, &high);
10434                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10435                 unlock_iovec(vec, arg2, arg3, 1);
10436             } else {
10437                 ret = -host_to_target_errno(errno);
10438            }
10439         }
10440         return ret;
10441 #endif
10442 #if defined(TARGET_NR_pwritev)
10443     case TARGET_NR_pwritev:
10444         {
10445             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10446             if (vec != NULL) {
10447                 unsigned long low, high;
10448 
10449                 target_to_host_low_high(arg4, arg5, &low, &high);
10450                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10451                 unlock_iovec(vec, arg2, arg3, 0);
10452             } else {
10453                 ret = -host_to_target_errno(errno);
10454            }
10455         }
10456         return ret;
10457 #endif
10458     case TARGET_NR_getsid:
10459         return get_errno(getsid(arg1));
10460 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10461     case TARGET_NR_fdatasync:
10462         return get_errno(fdatasync(arg1));
10463 #endif
10464     case TARGET_NR_sched_getaffinity:
10465         {
10466             unsigned int mask_size;
10467             unsigned long *mask;
10468 
10469             /*
10470              * sched_getaffinity needs multiples of ulong, so need to take
10471              * care of mismatches between target ulong and host ulong sizes.
10472              */
10473             if (arg2 & (sizeof(abi_ulong) - 1)) {
10474                 return -TARGET_EINVAL;
10475             }
10476             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10477 
10478             mask = alloca(mask_size);
10479             memset(mask, 0, mask_size);
10480             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10481 
10482             if (!is_error(ret)) {
10483                 if (ret > arg2) {
10484                     /* More data returned than the caller's buffer will fit.
10485                      * This only happens if sizeof(abi_long) < sizeof(long)
10486                      * and the caller passed us a buffer holding an odd number
10487                      * of abi_longs. If the host kernel is actually using the
10488                      * extra 4 bytes then fail EINVAL; otherwise we can just
10489                      * ignore them and only copy the interesting part.
10490                      */
10491                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10492                     if (numcpus > arg2 * 8) {
10493                         return -TARGET_EINVAL;
10494                     }
10495                     ret = arg2;
10496                 }
10497 
10498                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10499                     return -TARGET_EFAULT;
10500                 }
10501             }
10502         }
10503         return ret;
10504     case TARGET_NR_sched_setaffinity:
10505         {
10506             unsigned int mask_size;
10507             unsigned long *mask;
10508 
10509             /*
10510              * sched_setaffinity needs multiples of ulong, so need to take
10511              * care of mismatches between target ulong and host ulong sizes.
10512              */
10513             if (arg2 & (sizeof(abi_ulong) - 1)) {
10514                 return -TARGET_EINVAL;
10515             }
10516             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10517             mask = alloca(mask_size);
10518 
10519             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10520             if (ret) {
10521                 return ret;
10522             }
10523 
10524             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10525         }
10526     case TARGET_NR_getcpu:
10527         {
10528             unsigned cpu, node;
10529             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10530                                        arg2 ? &node : NULL,
10531                                        NULL));
10532             if (is_error(ret)) {
10533                 return ret;
10534             }
10535             if (arg1 && put_user_u32(cpu, arg1)) {
10536                 return -TARGET_EFAULT;
10537             }
10538             if (arg2 && put_user_u32(node, arg2)) {
10539                 return -TARGET_EFAULT;
10540             }
10541         }
10542         return ret;
10543     case TARGET_NR_sched_setparam:
10544         {
10545             struct sched_param *target_schp;
10546             struct sched_param schp;
10547 
10548             if (arg2 == 0) {
10549                 return -TARGET_EINVAL;
10550             }
10551             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10552                 return -TARGET_EFAULT;
10553             schp.sched_priority = tswap32(target_schp->sched_priority);
10554             unlock_user_struct(target_schp, arg2, 0);
10555             return get_errno(sched_setparam(arg1, &schp));
10556         }
10557     case TARGET_NR_sched_getparam:
10558         {
10559             struct sched_param *target_schp;
10560             struct sched_param schp;
10561 
10562             if (arg2 == 0) {
10563                 return -TARGET_EINVAL;
10564             }
10565             ret = get_errno(sched_getparam(arg1, &schp));
10566             if (!is_error(ret)) {
10567                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10568                     return -TARGET_EFAULT;
10569                 target_schp->sched_priority = tswap32(schp.sched_priority);
10570                 unlock_user_struct(target_schp, arg2, 1);
10571             }
10572         }
10573         return ret;
10574     case TARGET_NR_sched_setscheduler:
10575         {
10576             struct sched_param *target_schp;
10577             struct sched_param schp;
10578             if (arg3 == 0) {
10579                 return -TARGET_EINVAL;
10580             }
10581             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10582                 return -TARGET_EFAULT;
10583             schp.sched_priority = tswap32(target_schp->sched_priority);
10584             unlock_user_struct(target_schp, arg3, 0);
10585             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10586         }
10587     case TARGET_NR_sched_getscheduler:
10588         return get_errno(sched_getscheduler(arg1));
10589     case TARGET_NR_sched_yield:
10590         return get_errno(sched_yield());
10591     case TARGET_NR_sched_get_priority_max:
10592         return get_errno(sched_get_priority_max(arg1));
10593     case TARGET_NR_sched_get_priority_min:
10594         return get_errno(sched_get_priority_min(arg1));
10595 #ifdef TARGET_NR_sched_rr_get_interval
10596     case TARGET_NR_sched_rr_get_interval:
10597         {
10598             struct timespec ts;
10599             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10600             if (!is_error(ret)) {
10601                 ret = host_to_target_timespec(arg2, &ts);
10602             }
10603         }
10604         return ret;
10605 #endif
10606 #ifdef TARGET_NR_sched_rr_get_interval_time64
10607     case TARGET_NR_sched_rr_get_interval_time64:
10608         {
10609             struct timespec ts;
10610             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10611             if (!is_error(ret)) {
10612                 ret = host_to_target_timespec64(arg2, &ts);
10613             }
10614         }
10615         return ret;
10616 #endif
10617 #if defined(TARGET_NR_nanosleep)
10618     case TARGET_NR_nanosleep:
10619         {
10620             struct timespec req, rem;
10621             target_to_host_timespec(&req, arg1);
10622             ret = get_errno(safe_nanosleep(&req, &rem));
10623             if (is_error(ret) && arg2) {
10624                 host_to_target_timespec(arg2, &rem);
10625             }
10626         }
10627         return ret;
10628 #endif
10629     case TARGET_NR_prctl:
10630         switch (arg1) {
10631         case PR_GET_PDEATHSIG:
10632         {
10633             int deathsig;
10634             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10635             if (!is_error(ret) && arg2
10636                 && put_user_s32(deathsig, arg2)) {
10637                 return -TARGET_EFAULT;
10638             }
10639             return ret;
10640         }
10641 #ifdef PR_GET_NAME
10642         case PR_GET_NAME:
10643         {
10644             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10645             if (!name) {
10646                 return -TARGET_EFAULT;
10647             }
10648             ret = get_errno(prctl(arg1, (unsigned long)name,
10649                                   arg3, arg4, arg5));
10650             unlock_user(name, arg2, 16);
10651             return ret;
10652         }
10653         case PR_SET_NAME:
10654         {
10655             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10656             if (!name) {
10657                 return -TARGET_EFAULT;
10658             }
10659             ret = get_errno(prctl(arg1, (unsigned long)name,
10660                                   arg3, arg4, arg5));
10661             unlock_user(name, arg2, 0);
10662             return ret;
10663         }
10664 #endif
10665 #ifdef TARGET_MIPS
10666         case TARGET_PR_GET_FP_MODE:
10667         {
10668             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10669             ret = 0;
10670             if (env->CP0_Status & (1 << CP0St_FR)) {
10671                 ret |= TARGET_PR_FP_MODE_FR;
10672             }
10673             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10674                 ret |= TARGET_PR_FP_MODE_FRE;
10675             }
10676             return ret;
10677         }
10678         case TARGET_PR_SET_FP_MODE:
10679         {
10680             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10681             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10682             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10683             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10684             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10685 
10686             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10687                                             TARGET_PR_FP_MODE_FRE;
10688 
10689             /* If nothing to change, return right away, successfully.  */
10690             if (old_fr == new_fr && old_fre == new_fre) {
10691                 return 0;
10692             }
10693             /* Check the value is valid */
10694             if (arg2 & ~known_bits) {
10695                 return -TARGET_EOPNOTSUPP;
10696             }
10697             /* Setting FRE without FR is not supported.  */
10698             if (new_fre && !new_fr) {
10699                 return -TARGET_EOPNOTSUPP;
10700             }
10701             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10702                 /* FR1 is not supported */
10703                 return -TARGET_EOPNOTSUPP;
10704             }
10705             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10706                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10707                 /* cannot set FR=0 */
10708                 return -TARGET_EOPNOTSUPP;
10709             }
10710             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10711                 /* Cannot set FRE=1 */
10712                 return -TARGET_EOPNOTSUPP;
10713             }
10714 
10715             int i;
10716             fpr_t *fpr = env->active_fpu.fpr;
10717             for (i = 0; i < 32 ; i += 2) {
10718                 if (!old_fr && new_fr) {
10719                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10720                 } else if (old_fr && !new_fr) {
10721                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10722                 }
10723             }
10724 
10725             if (new_fr) {
10726                 env->CP0_Status |= (1 << CP0St_FR);
10727                 env->hflags |= MIPS_HFLAG_F64;
10728             } else {
10729                 env->CP0_Status &= ~(1 << CP0St_FR);
10730                 env->hflags &= ~MIPS_HFLAG_F64;
10731             }
10732             if (new_fre) {
10733                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10734                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10735                     env->hflags |= MIPS_HFLAG_FRE;
10736                 }
10737             } else {
10738                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10739                 env->hflags &= ~MIPS_HFLAG_FRE;
10740             }
10741 
10742             return 0;
10743         }
10744 #endif /* MIPS */
10745 #ifdef TARGET_AARCH64
10746         case TARGET_PR_SVE_SET_VL:
10747             /*
10748              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10749              * PR_SVE_VL_INHERIT.  Note the kernel definition
10750              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10751              * even though the current architectural maximum is VQ=16.
10752              */
10753             ret = -TARGET_EINVAL;
10754             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10755                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10756                 CPUARMState *env = cpu_env;
10757                 ARMCPU *cpu = env_archcpu(env);
10758                 uint32_t vq, old_vq;
10759 
10760                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10761                 vq = MAX(arg2 / 16, 1);
10762                 vq = MIN(vq, cpu->sve_max_vq);
10763 
10764                 if (vq < old_vq) {
10765                     aarch64_sve_narrow_vq(env, vq);
10766                 }
10767                 env->vfp.zcr_el[1] = vq - 1;
10768                 arm_rebuild_hflags(env);
10769                 ret = vq * 16;
10770             }
10771             return ret;
10772         case TARGET_PR_SVE_GET_VL:
10773             ret = -TARGET_EINVAL;
10774             {
10775                 ARMCPU *cpu = env_archcpu(cpu_env);
10776                 if (cpu_isar_feature(aa64_sve, cpu)) {
10777                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10778                 }
10779             }
10780             return ret;
10781         case TARGET_PR_PAC_RESET_KEYS:
10782             {
10783                 CPUARMState *env = cpu_env;
10784                 ARMCPU *cpu = env_archcpu(env);
10785 
10786                 if (arg3 || arg4 || arg5) {
10787                     return -TARGET_EINVAL;
10788                 }
10789                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10790                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10791                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10792                                TARGET_PR_PAC_APGAKEY);
10793                     int ret = 0;
10794                     Error *err = NULL;
10795 
10796                     if (arg2 == 0) {
10797                         arg2 = all;
10798                     } else if (arg2 & ~all) {
10799                         return -TARGET_EINVAL;
10800                     }
10801                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10802                         ret |= qemu_guest_getrandom(&env->keys.apia,
10803                                                     sizeof(ARMPACKey), &err);
10804                     }
10805                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10806                         ret |= qemu_guest_getrandom(&env->keys.apib,
10807                                                     sizeof(ARMPACKey), &err);
10808                     }
10809                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10810                         ret |= qemu_guest_getrandom(&env->keys.apda,
10811                                                     sizeof(ARMPACKey), &err);
10812                     }
10813                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10814                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10815                                                     sizeof(ARMPACKey), &err);
10816                     }
10817                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10818                         ret |= qemu_guest_getrandom(&env->keys.apga,
10819                                                     sizeof(ARMPACKey), &err);
10820                     }
10821                     if (ret != 0) {
10822                         /*
10823                          * Some unknown failure in the crypto.  The best
10824                          * we can do is log it and fail the syscall.
10825                          * The real syscall cannot fail this way.
10826                          */
10827                         qemu_log_mask(LOG_UNIMP,
10828                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10829                                       error_get_pretty(err));
10830                         error_free(err);
10831                         return -TARGET_EIO;
10832                     }
10833                     return 0;
10834                 }
10835             }
10836             return -TARGET_EINVAL;
10837         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10838             {
10839                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10840                 CPUARMState *env = cpu_env;
10841                 ARMCPU *cpu = env_archcpu(env);
10842 
10843                 if (cpu_isar_feature(aa64_mte, cpu)) {
10844                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10845                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10846                 }
10847 
10848                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10849                     return -TARGET_EINVAL;
10850                 }
10851                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10852 
10853                 if (cpu_isar_feature(aa64_mte, cpu)) {
10854                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10855                     case TARGET_PR_MTE_TCF_NONE:
10856                     case TARGET_PR_MTE_TCF_SYNC:
10857                     case TARGET_PR_MTE_TCF_ASYNC:
10858                         break;
10859                     default:
10860                         return -EINVAL;
10861                     }
10862 
10863                     /*
10864                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10865                      * Note that the syscall values are consistent with hw.
10866                      */
10867                     env->cp15.sctlr_el[1] =
10868                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10869                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10870 
10871                     /*
10872                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10873                      * Note that the syscall uses an include mask,
10874                      * and hardware uses an exclude mask -- invert.
10875                      */
10876                     env->cp15.gcr_el1 =
10877                         deposit64(env->cp15.gcr_el1, 0, 16,
10878                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10879                     arm_rebuild_hflags(env);
10880                 }
10881                 return 0;
10882             }
10883         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10884             {
10885                 abi_long ret = 0;
10886                 CPUARMState *env = cpu_env;
10887                 ARMCPU *cpu = env_archcpu(env);
10888 
10889                 if (arg2 || arg3 || arg4 || arg5) {
10890                     return -TARGET_EINVAL;
10891                 }
10892                 if (env->tagged_addr_enable) {
10893                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10894                 }
10895                 if (cpu_isar_feature(aa64_mte, cpu)) {
10896                     /* See above. */
10897                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10898                             << TARGET_PR_MTE_TCF_SHIFT);
10899                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10900                                     ~env->cp15.gcr_el1);
10901                 }
10902                 return ret;
10903             }
10904 #endif /* AARCH64 */
10905         case PR_GET_SECCOMP:
10906         case PR_SET_SECCOMP:
10907             /* Disable seccomp to prevent the target disabling syscalls we
10908              * need. */
10909             return -TARGET_EINVAL;
10910         default:
10911             /* Most prctl options have no pointer arguments */
10912             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10913         }
10914         break;
10915 #ifdef TARGET_NR_arch_prctl
10916     case TARGET_NR_arch_prctl:
10917         return do_arch_prctl(cpu_env, arg1, arg2);
10918 #endif
10919 #ifdef TARGET_NR_pread64
10920     case TARGET_NR_pread64:
10921         if (regpairs_aligned(cpu_env, num)) {
10922             arg4 = arg5;
10923             arg5 = arg6;
10924         }
10925         if (arg2 == 0 && arg3 == 0) {
10926             /* Special-case NULL buffer and zero length, which should succeed */
10927             p = 0;
10928         } else {
10929             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10930             if (!p) {
10931                 return -TARGET_EFAULT;
10932             }
10933         }
10934         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10935         unlock_user(p, arg2, ret);
10936         return ret;
10937     case TARGET_NR_pwrite64:
10938         if (regpairs_aligned(cpu_env, num)) {
10939             arg4 = arg5;
10940             arg5 = arg6;
10941         }
10942         if (arg2 == 0 && arg3 == 0) {
10943             /* Special-case NULL buffer and zero length, which should succeed */
10944             p = 0;
10945         } else {
10946             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10947             if (!p) {
10948                 return -TARGET_EFAULT;
10949             }
10950         }
10951         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10952         unlock_user(p, arg2, 0);
10953         return ret;
10954 #endif
10955     case TARGET_NR_getcwd:
10956         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10957             return -TARGET_EFAULT;
10958         ret = get_errno(sys_getcwd1(p, arg2));
10959         unlock_user(p, arg1, ret);
10960         return ret;
10961     case TARGET_NR_capget:
10962     case TARGET_NR_capset:
10963     {
10964         struct target_user_cap_header *target_header;
10965         struct target_user_cap_data *target_data = NULL;
10966         struct __user_cap_header_struct header;
10967         struct __user_cap_data_struct data[2];
10968         struct __user_cap_data_struct *dataptr = NULL;
10969         int i, target_datalen;
10970         int data_items = 1;
10971 
10972         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10973             return -TARGET_EFAULT;
10974         }
10975         header.version = tswap32(target_header->version);
10976         header.pid = tswap32(target_header->pid);
10977 
10978         if (header.version != _LINUX_CAPABILITY_VERSION) {
10979             /* Version 2 and up takes pointer to two user_data structs */
10980             data_items = 2;
10981         }
10982 
10983         target_datalen = sizeof(*target_data) * data_items;
10984 
10985         if (arg2) {
10986             if (num == TARGET_NR_capget) {
10987                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10988             } else {
10989                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10990             }
10991             if (!target_data) {
10992                 unlock_user_struct(target_header, arg1, 0);
10993                 return -TARGET_EFAULT;
10994             }
10995 
10996             if (num == TARGET_NR_capset) {
10997                 for (i = 0; i < data_items; i++) {
10998                     data[i].effective = tswap32(target_data[i].effective);
10999                     data[i].permitted = tswap32(target_data[i].permitted);
11000                     data[i].inheritable = tswap32(target_data[i].inheritable);
11001                 }
11002             }
11003 
11004             dataptr = data;
11005         }
11006 
11007         if (num == TARGET_NR_capget) {
11008             ret = get_errno(capget(&header, dataptr));
11009         } else {
11010             ret = get_errno(capset(&header, dataptr));
11011         }
11012 
11013         /* The kernel always updates version for both capget and capset */
11014         target_header->version = tswap32(header.version);
11015         unlock_user_struct(target_header, arg1, 1);
11016 
11017         if (arg2) {
11018             if (num == TARGET_NR_capget) {
11019                 for (i = 0; i < data_items; i++) {
11020                     target_data[i].effective = tswap32(data[i].effective);
11021                     target_data[i].permitted = tswap32(data[i].permitted);
11022                     target_data[i].inheritable = tswap32(data[i].inheritable);
11023                 }
11024                 unlock_user(target_data, arg2, target_datalen);
11025             } else {
11026                 unlock_user(target_data, arg2, 0);
11027             }
11028         }
11029         return ret;
11030     }
11031     case TARGET_NR_sigaltstack:
11032         return do_sigaltstack(arg1, arg2, cpu_env);
11033 
11034 #ifdef CONFIG_SENDFILE
11035 #ifdef TARGET_NR_sendfile
11036     case TARGET_NR_sendfile:
11037     {
11038         off_t *offp = NULL;
11039         off_t off;
11040         if (arg3) {
11041             ret = get_user_sal(off, arg3);
11042             if (is_error(ret)) {
11043                 return ret;
11044             }
11045             offp = &off;
11046         }
11047         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11048         if (!is_error(ret) && arg3) {
11049             abi_long ret2 = put_user_sal(off, arg3);
11050             if (is_error(ret2)) {
11051                 ret = ret2;
11052             }
11053         }
11054         return ret;
11055     }
11056 #endif
11057 #ifdef TARGET_NR_sendfile64
11058     case TARGET_NR_sendfile64:
11059     {
11060         off_t *offp = NULL;
11061         off_t off;
11062         if (arg3) {
11063             ret = get_user_s64(off, arg3);
11064             if (is_error(ret)) {
11065                 return ret;
11066             }
11067             offp = &off;
11068         }
11069         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11070         if (!is_error(ret) && arg3) {
11071             abi_long ret2 = put_user_s64(off, arg3);
11072             if (is_error(ret2)) {
11073                 ret = ret2;
11074             }
11075         }
11076         return ret;
11077     }
11078 #endif
11079 #endif
11080 #ifdef TARGET_NR_vfork
11081     case TARGET_NR_vfork:
11082         return get_errno(do_fork(cpu_env,
11083                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11084                          0, 0, 0, 0));
11085 #endif
11086 #ifdef TARGET_NR_ugetrlimit
11087     case TARGET_NR_ugetrlimit:
11088     {
11089 	struct rlimit rlim;
11090 	int resource = target_to_host_resource(arg1);
11091 	ret = get_errno(getrlimit(resource, &rlim));
11092 	if (!is_error(ret)) {
11093 	    struct target_rlimit *target_rlim;
11094             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11095                 return -TARGET_EFAULT;
11096 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11097 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11098             unlock_user_struct(target_rlim, arg2, 1);
11099 	}
11100         return ret;
11101     }
11102 #endif
11103 #ifdef TARGET_NR_truncate64
11104     case TARGET_NR_truncate64:
11105         if (!(p = lock_user_string(arg1)))
11106             return -TARGET_EFAULT;
11107 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11108         unlock_user(p, arg1, 0);
11109         return ret;
11110 #endif
11111 #ifdef TARGET_NR_ftruncate64
11112     case TARGET_NR_ftruncate64:
11113         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11114 #endif
11115 #ifdef TARGET_NR_stat64
11116     case TARGET_NR_stat64:
11117         if (!(p = lock_user_string(arg1))) {
11118             return -TARGET_EFAULT;
11119         }
11120         ret = get_errno(stat(path(p), &st));
11121         unlock_user(p, arg1, 0);
11122         if (!is_error(ret))
11123             ret = host_to_target_stat64(cpu_env, arg2, &st);
11124         return ret;
11125 #endif
11126 #ifdef TARGET_NR_lstat64
11127     case TARGET_NR_lstat64:
11128         if (!(p = lock_user_string(arg1))) {
11129             return -TARGET_EFAULT;
11130         }
11131         ret = get_errno(lstat(path(p), &st));
11132         unlock_user(p, arg1, 0);
11133         if (!is_error(ret))
11134             ret = host_to_target_stat64(cpu_env, arg2, &st);
11135         return ret;
11136 #endif
11137 #ifdef TARGET_NR_fstat64
11138     case TARGET_NR_fstat64:
11139         ret = get_errno(fstat(arg1, &st));
11140         if (!is_error(ret))
11141             ret = host_to_target_stat64(cpu_env, arg2, &st);
11142         return ret;
11143 #endif
11144 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11145 #ifdef TARGET_NR_fstatat64
11146     case TARGET_NR_fstatat64:
11147 #endif
11148 #ifdef TARGET_NR_newfstatat
11149     case TARGET_NR_newfstatat:
11150 #endif
11151         if (!(p = lock_user_string(arg2))) {
11152             return -TARGET_EFAULT;
11153         }
11154         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11155         unlock_user(p, arg2, 0);
11156         if (!is_error(ret))
11157             ret = host_to_target_stat64(cpu_env, arg3, &st);
11158         return ret;
11159 #endif
11160 #if defined(TARGET_NR_statx)
11161     case TARGET_NR_statx:
11162         {
11163             struct target_statx *target_stx;
11164             int dirfd = arg1;
11165             int flags = arg3;
11166 
11167             p = lock_user_string(arg2);
11168             if (p == NULL) {
11169                 return -TARGET_EFAULT;
11170             }
11171 #if defined(__NR_statx)
11172             {
11173                 /*
11174                  * It is assumed that struct statx is architecture independent.
11175                  */
11176                 struct target_statx host_stx;
11177                 int mask = arg4;
11178 
11179                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11180                 if (!is_error(ret)) {
11181                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11182                         unlock_user(p, arg2, 0);
11183                         return -TARGET_EFAULT;
11184                     }
11185                 }
11186 
11187                 if (ret != -TARGET_ENOSYS) {
11188                     unlock_user(p, arg2, 0);
11189                     return ret;
11190                 }
11191             }
11192 #endif
11193             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11194             unlock_user(p, arg2, 0);
11195 
11196             if (!is_error(ret)) {
11197                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11198                     return -TARGET_EFAULT;
11199                 }
11200                 memset(target_stx, 0, sizeof(*target_stx));
11201                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11202                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11203                 __put_user(st.st_ino, &target_stx->stx_ino);
11204                 __put_user(st.st_mode, &target_stx->stx_mode);
11205                 __put_user(st.st_uid, &target_stx->stx_uid);
11206                 __put_user(st.st_gid, &target_stx->stx_gid);
11207                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11208                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11209                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11210                 __put_user(st.st_size, &target_stx->stx_size);
11211                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11212                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11213                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11214                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11215                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11216                 unlock_user_struct(target_stx, arg5, 1);
11217             }
11218         }
11219         return ret;
11220 #endif
11221 #ifdef TARGET_NR_lchown
11222     case TARGET_NR_lchown:
11223         if (!(p = lock_user_string(arg1)))
11224             return -TARGET_EFAULT;
11225         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11226         unlock_user(p, arg1, 0);
11227         return ret;
11228 #endif
11229 #ifdef TARGET_NR_getuid
11230     case TARGET_NR_getuid:
11231         return get_errno(high2lowuid(getuid()));
11232 #endif
11233 #ifdef TARGET_NR_getgid
11234     case TARGET_NR_getgid:
11235         return get_errno(high2lowgid(getgid()));
11236 #endif
11237 #ifdef TARGET_NR_geteuid
11238     case TARGET_NR_geteuid:
11239         return get_errno(high2lowuid(geteuid()));
11240 #endif
11241 #ifdef TARGET_NR_getegid
11242     case TARGET_NR_getegid:
11243         return get_errno(high2lowgid(getegid()));
11244 #endif
11245     case TARGET_NR_setreuid:
11246         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11247     case TARGET_NR_setregid:
11248         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11249     case TARGET_NR_getgroups:
11250         {
11251             int gidsetsize = arg1;
11252             target_id *target_grouplist;
11253             gid_t *grouplist;
11254             int i;
11255 
11256             grouplist = alloca(gidsetsize * sizeof(gid_t));
11257             ret = get_errno(getgroups(gidsetsize, grouplist));
11258             if (gidsetsize == 0)
11259                 return ret;
11260             if (!is_error(ret)) {
11261                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11262                 if (!target_grouplist)
11263                     return -TARGET_EFAULT;
11264                 for(i = 0;i < ret; i++)
11265                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11266                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11267             }
11268         }
11269         return ret;
11270     case TARGET_NR_setgroups:
11271         {
11272             int gidsetsize = arg1;
11273             target_id *target_grouplist;
11274             gid_t *grouplist = NULL;
11275             int i;
11276             if (gidsetsize) {
11277                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11278                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11279                 if (!target_grouplist) {
11280                     return -TARGET_EFAULT;
11281                 }
11282                 for (i = 0; i < gidsetsize; i++) {
11283                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11284                 }
11285                 unlock_user(target_grouplist, arg2, 0);
11286             }
11287             return get_errno(setgroups(gidsetsize, grouplist));
11288         }
11289     case TARGET_NR_fchown:
11290         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11291 #if defined(TARGET_NR_fchownat)
11292     case TARGET_NR_fchownat:
11293         if (!(p = lock_user_string(arg2)))
11294             return -TARGET_EFAULT;
11295         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11296                                  low2highgid(arg4), arg5));
11297         unlock_user(p, arg2, 0);
11298         return ret;
11299 #endif
11300 #ifdef TARGET_NR_setresuid
11301     case TARGET_NR_setresuid:
11302         return get_errno(sys_setresuid(low2highuid(arg1),
11303                                        low2highuid(arg2),
11304                                        low2highuid(arg3)));
11305 #endif
11306 #ifdef TARGET_NR_getresuid
11307     case TARGET_NR_getresuid:
11308         {
11309             uid_t ruid, euid, suid;
11310             ret = get_errno(getresuid(&ruid, &euid, &suid));
11311             if (!is_error(ret)) {
11312                 if (put_user_id(high2lowuid(ruid), arg1)
11313                     || put_user_id(high2lowuid(euid), arg2)
11314                     || put_user_id(high2lowuid(suid), arg3))
11315                     return -TARGET_EFAULT;
11316             }
11317         }
11318         return ret;
11319 #endif
11320 #ifdef TARGET_NR_getresgid
11321     case TARGET_NR_setresgid:
11322         return get_errno(sys_setresgid(low2highgid(arg1),
11323                                        low2highgid(arg2),
11324                                        low2highgid(arg3)));
11325 #endif
11326 #ifdef TARGET_NR_getresgid
11327     case TARGET_NR_getresgid:
11328         {
11329             gid_t rgid, egid, sgid;
11330             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11331             if (!is_error(ret)) {
11332                 if (put_user_id(high2lowgid(rgid), arg1)
11333                     || put_user_id(high2lowgid(egid), arg2)
11334                     || put_user_id(high2lowgid(sgid), arg3))
11335                     return -TARGET_EFAULT;
11336             }
11337         }
11338         return ret;
11339 #endif
11340 #ifdef TARGET_NR_chown
11341     case TARGET_NR_chown:
11342         if (!(p = lock_user_string(arg1)))
11343             return -TARGET_EFAULT;
11344         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11345         unlock_user(p, arg1, 0);
11346         return ret;
11347 #endif
11348     case TARGET_NR_setuid:
11349         return get_errno(sys_setuid(low2highuid(arg1)));
11350     case TARGET_NR_setgid:
11351         return get_errno(sys_setgid(low2highgid(arg1)));
11352     case TARGET_NR_setfsuid:
11353         return get_errno(setfsuid(arg1));
11354     case TARGET_NR_setfsgid:
11355         return get_errno(setfsgid(arg1));
11356 
11357 #ifdef TARGET_NR_lchown32
11358     case TARGET_NR_lchown32:
11359         if (!(p = lock_user_string(arg1)))
11360             return -TARGET_EFAULT;
11361         ret = get_errno(lchown(p, arg2, arg3));
11362         unlock_user(p, arg1, 0);
11363         return ret;
11364 #endif
11365 #ifdef TARGET_NR_getuid32
11366     case TARGET_NR_getuid32:
11367         return get_errno(getuid());
11368 #endif
11369 
11370 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11371    /* Alpha specific */
11372     case TARGET_NR_getxuid:
11373          {
11374             uid_t euid;
11375             euid=geteuid();
11376             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11377          }
11378         return get_errno(getuid());
11379 #endif
11380 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11381    /* Alpha specific */
11382     case TARGET_NR_getxgid:
11383          {
11384             uid_t egid;
11385             egid=getegid();
11386             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11387          }
11388         return get_errno(getgid());
11389 #endif
11390 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11391     /* Alpha specific */
11392     case TARGET_NR_osf_getsysinfo:
11393         ret = -TARGET_EOPNOTSUPP;
11394         switch (arg1) {
11395           case TARGET_GSI_IEEE_FP_CONTROL:
11396             {
11397                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11398                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11399 
11400                 swcr &= ~SWCR_STATUS_MASK;
11401                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11402 
11403                 if (put_user_u64 (swcr, arg2))
11404                         return -TARGET_EFAULT;
11405                 ret = 0;
11406             }
11407             break;
11408 
11409           /* case GSI_IEEE_STATE_AT_SIGNAL:
11410              -- Not implemented in linux kernel.
11411              case GSI_UACPROC:
11412              -- Retrieves current unaligned access state; not much used.
11413              case GSI_PROC_TYPE:
11414              -- Retrieves implver information; surely not used.
11415              case GSI_GET_HWRPB:
11416              -- Grabs a copy of the HWRPB; surely not used.
11417           */
11418         }
11419         return ret;
11420 #endif
11421 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11422     /* Alpha specific */
11423     case TARGET_NR_osf_setsysinfo:
11424         ret = -TARGET_EOPNOTSUPP;
11425         switch (arg1) {
11426           case TARGET_SSI_IEEE_FP_CONTROL:
11427             {
11428                 uint64_t swcr, fpcr;
11429 
11430                 if (get_user_u64 (swcr, arg2)) {
11431                     return -TARGET_EFAULT;
11432                 }
11433 
11434                 /*
11435                  * The kernel calls swcr_update_status to update the
11436                  * status bits from the fpcr at every point that it
11437                  * could be queried.  Therefore, we store the status
11438                  * bits only in FPCR.
11439                  */
11440                 ((CPUAlphaState *)cpu_env)->swcr
11441                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11442 
11443                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11444                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11445                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11446                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11447                 ret = 0;
11448             }
11449             break;
11450 
11451           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11452             {
11453                 uint64_t exc, fpcr, fex;
11454 
11455                 if (get_user_u64(exc, arg2)) {
11456                     return -TARGET_EFAULT;
11457                 }
11458                 exc &= SWCR_STATUS_MASK;
11459                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11460 
11461                 /* Old exceptions are not signaled.  */
11462                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11463                 fex = exc & ~fex;
11464                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11465                 fex &= ((CPUArchState *)cpu_env)->swcr;
11466 
11467                 /* Update the hardware fpcr.  */
11468                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11469                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11470 
11471                 if (fex) {
11472                     int si_code = TARGET_FPE_FLTUNK;
11473                     target_siginfo_t info;
11474 
11475                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11476                         si_code = TARGET_FPE_FLTUND;
11477                     }
11478                     if (fex & SWCR_TRAP_ENABLE_INE) {
11479                         si_code = TARGET_FPE_FLTRES;
11480                     }
11481                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11482                         si_code = TARGET_FPE_FLTUND;
11483                     }
11484                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11485                         si_code = TARGET_FPE_FLTOVF;
11486                     }
11487                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11488                         si_code = TARGET_FPE_FLTDIV;
11489                     }
11490                     if (fex & SWCR_TRAP_ENABLE_INV) {
11491                         si_code = TARGET_FPE_FLTINV;
11492                     }
11493 
11494                     info.si_signo = SIGFPE;
11495                     info.si_errno = 0;
11496                     info.si_code = si_code;
11497                     info._sifields._sigfault._addr
11498                         = ((CPUArchState *)cpu_env)->pc;
11499                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11500                                  QEMU_SI_FAULT, &info);
11501                 }
11502                 ret = 0;
11503             }
11504             break;
11505 
11506           /* case SSI_NVPAIRS:
11507              -- Used with SSIN_UACPROC to enable unaligned accesses.
11508              case SSI_IEEE_STATE_AT_SIGNAL:
11509              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11510              -- Not implemented in linux kernel
11511           */
11512         }
11513         return ret;
11514 #endif
11515 #ifdef TARGET_NR_osf_sigprocmask
11516     /* Alpha specific.  */
11517     case TARGET_NR_osf_sigprocmask:
11518         {
11519             abi_ulong mask;
11520             int how;
11521             sigset_t set, oldset;
11522 
11523             switch(arg1) {
11524             case TARGET_SIG_BLOCK:
11525                 how = SIG_BLOCK;
11526                 break;
11527             case TARGET_SIG_UNBLOCK:
11528                 how = SIG_UNBLOCK;
11529                 break;
11530             case TARGET_SIG_SETMASK:
11531                 how = SIG_SETMASK;
11532                 break;
11533             default:
11534                 return -TARGET_EINVAL;
11535             }
11536             mask = arg2;
11537             target_to_host_old_sigset(&set, &mask);
11538             ret = do_sigprocmask(how, &set, &oldset);
11539             if (!ret) {
11540                 host_to_target_old_sigset(&mask, &oldset);
11541                 ret = mask;
11542             }
11543         }
11544         return ret;
11545 #endif
11546 
11547 #ifdef TARGET_NR_getgid32
11548     case TARGET_NR_getgid32:
11549         return get_errno(getgid());
11550 #endif
11551 #ifdef TARGET_NR_geteuid32
11552     case TARGET_NR_geteuid32:
11553         return get_errno(geteuid());
11554 #endif
11555 #ifdef TARGET_NR_getegid32
11556     case TARGET_NR_getegid32:
11557         return get_errno(getegid());
11558 #endif
11559 #ifdef TARGET_NR_setreuid32
11560     case TARGET_NR_setreuid32:
11561         return get_errno(setreuid(arg1, arg2));
11562 #endif
11563 #ifdef TARGET_NR_setregid32
11564     case TARGET_NR_setregid32:
11565         return get_errno(setregid(arg1, arg2));
11566 #endif
11567 #ifdef TARGET_NR_getgroups32
11568     case TARGET_NR_getgroups32:
11569         {
11570             int gidsetsize = arg1;
11571             uint32_t *target_grouplist;
11572             gid_t *grouplist;
11573             int i;
11574 
11575             grouplist = alloca(gidsetsize * sizeof(gid_t));
11576             ret = get_errno(getgroups(gidsetsize, grouplist));
11577             if (gidsetsize == 0)
11578                 return ret;
11579             if (!is_error(ret)) {
11580                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11581                 if (!target_grouplist) {
11582                     return -TARGET_EFAULT;
11583                 }
11584                 for(i = 0;i < ret; i++)
11585                     target_grouplist[i] = tswap32(grouplist[i]);
11586                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11587             }
11588         }
11589         return ret;
11590 #endif
11591 #ifdef TARGET_NR_setgroups32
11592     case TARGET_NR_setgroups32:
11593         {
11594             int gidsetsize = arg1;
11595             uint32_t *target_grouplist;
11596             gid_t *grouplist;
11597             int i;
11598 
11599             grouplist = alloca(gidsetsize * sizeof(gid_t));
11600             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11601             if (!target_grouplist) {
11602                 return -TARGET_EFAULT;
11603             }
11604             for(i = 0;i < gidsetsize; i++)
11605                 grouplist[i] = tswap32(target_grouplist[i]);
11606             unlock_user(target_grouplist, arg2, 0);
11607             return get_errno(setgroups(gidsetsize, grouplist));
11608         }
11609 #endif
11610 #ifdef TARGET_NR_fchown32
11611     case TARGET_NR_fchown32:
11612         return get_errno(fchown(arg1, arg2, arg3));
11613 #endif
11614 #ifdef TARGET_NR_setresuid32
11615     case TARGET_NR_setresuid32:
11616         return get_errno(sys_setresuid(arg1, arg2, arg3));
11617 #endif
11618 #ifdef TARGET_NR_getresuid32
11619     case TARGET_NR_getresuid32:
11620         {
11621             uid_t ruid, euid, suid;
11622             ret = get_errno(getresuid(&ruid, &euid, &suid));
11623             if (!is_error(ret)) {
11624                 if (put_user_u32(ruid, arg1)
11625                     || put_user_u32(euid, arg2)
11626                     || put_user_u32(suid, arg3))
11627                     return -TARGET_EFAULT;
11628             }
11629         }
11630         return ret;
11631 #endif
11632 #ifdef TARGET_NR_setresgid32
11633     case TARGET_NR_setresgid32:
11634         return get_errno(sys_setresgid(arg1, arg2, arg3));
11635 #endif
11636 #ifdef TARGET_NR_getresgid32
11637     case TARGET_NR_getresgid32:
11638         {
11639             gid_t rgid, egid, sgid;
11640             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11641             if (!is_error(ret)) {
11642                 if (put_user_u32(rgid, arg1)
11643                     || put_user_u32(egid, arg2)
11644                     || put_user_u32(sgid, arg3))
11645                     return -TARGET_EFAULT;
11646             }
11647         }
11648         return ret;
11649 #endif
11650 #ifdef TARGET_NR_chown32
11651     case TARGET_NR_chown32:
11652         if (!(p = lock_user_string(arg1)))
11653             return -TARGET_EFAULT;
11654         ret = get_errno(chown(p, arg2, arg3));
11655         unlock_user(p, arg1, 0);
11656         return ret;
11657 #endif
11658 #ifdef TARGET_NR_setuid32
11659     case TARGET_NR_setuid32:
11660         return get_errno(sys_setuid(arg1));
11661 #endif
11662 #ifdef TARGET_NR_setgid32
11663     case TARGET_NR_setgid32:
11664         return get_errno(sys_setgid(arg1));
11665 #endif
11666 #ifdef TARGET_NR_setfsuid32
11667     case TARGET_NR_setfsuid32:
11668         return get_errno(setfsuid(arg1));
11669 #endif
11670 #ifdef TARGET_NR_setfsgid32
11671     case TARGET_NR_setfsgid32:
11672         return get_errno(setfsgid(arg1));
11673 #endif
11674 #ifdef TARGET_NR_mincore
11675     case TARGET_NR_mincore:
11676         {
11677             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11678             if (!a) {
11679                 return -TARGET_ENOMEM;
11680             }
11681             p = lock_user_string(arg3);
11682             if (!p) {
11683                 ret = -TARGET_EFAULT;
11684             } else {
11685                 ret = get_errno(mincore(a, arg2, p));
11686                 unlock_user(p, arg3, ret);
11687             }
11688             unlock_user(a, arg1, 0);
11689         }
11690         return ret;
11691 #endif
11692 #ifdef TARGET_NR_arm_fadvise64_64
11693     case TARGET_NR_arm_fadvise64_64:
11694         /* arm_fadvise64_64 looks like fadvise64_64 but
11695          * with different argument order: fd, advice, offset, len
11696          * rather than the usual fd, offset, len, advice.
11697          * Note that offset and len are both 64-bit so appear as
11698          * pairs of 32-bit registers.
11699          */
11700         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11701                             target_offset64(arg5, arg6), arg2);
11702         return -host_to_target_errno(ret);
11703 #endif
11704 
11705 #if TARGET_ABI_BITS == 32
11706 
11707 #ifdef TARGET_NR_fadvise64_64
11708     case TARGET_NR_fadvise64_64:
11709 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11710         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11711         ret = arg2;
11712         arg2 = arg3;
11713         arg3 = arg4;
11714         arg4 = arg5;
11715         arg5 = arg6;
11716         arg6 = ret;
11717 #else
11718         /* 6 args: fd, offset (high, low), len (high, low), advice */
11719         if (regpairs_aligned(cpu_env, num)) {
11720             /* offset is in (3,4), len in (5,6) and advice in 7 */
11721             arg2 = arg3;
11722             arg3 = arg4;
11723             arg4 = arg5;
11724             arg5 = arg6;
11725             arg6 = arg7;
11726         }
11727 #endif
11728         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11729                             target_offset64(arg4, arg5), arg6);
11730         return -host_to_target_errno(ret);
11731 #endif
11732 
11733 #ifdef TARGET_NR_fadvise64
11734     case TARGET_NR_fadvise64:
11735         /* 5 args: fd, offset (high, low), len, advice */
11736         if (regpairs_aligned(cpu_env, num)) {
11737             /* offset is in (3,4), len in 5 and advice in 6 */
11738             arg2 = arg3;
11739             arg3 = arg4;
11740             arg4 = arg5;
11741             arg5 = arg6;
11742         }
11743         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11744         return -host_to_target_errno(ret);
11745 #endif
11746 
11747 #else /* not a 32-bit ABI */
11748 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11749 #ifdef TARGET_NR_fadvise64_64
11750     case TARGET_NR_fadvise64_64:
11751 #endif
11752 #ifdef TARGET_NR_fadvise64
11753     case TARGET_NR_fadvise64:
11754 #endif
11755 #ifdef TARGET_S390X
11756         switch (arg4) {
11757         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11758         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11759         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11760         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11761         default: break;
11762         }
11763 #endif
11764         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11765 #endif
11766 #endif /* end of 64-bit ABI fadvise handling */
11767 
11768 #ifdef TARGET_NR_madvise
11769     case TARGET_NR_madvise:
11770         /* A straight passthrough may not be safe because qemu sometimes
11771            turns private file-backed mappings into anonymous mappings.
11772            This will break MADV_DONTNEED.
11773            This is a hint, so ignoring and returning success is ok.  */
11774         return 0;
11775 #endif
11776 #ifdef TARGET_NR_fcntl64
11777     case TARGET_NR_fcntl64:
11778     {
11779         int cmd;
11780         struct flock64 fl;
11781         from_flock64_fn *copyfrom = copy_from_user_flock64;
11782         to_flock64_fn *copyto = copy_to_user_flock64;
11783 
11784 #ifdef TARGET_ARM
11785         if (!((CPUARMState *)cpu_env)->eabi) {
11786             copyfrom = copy_from_user_oabi_flock64;
11787             copyto = copy_to_user_oabi_flock64;
11788         }
11789 #endif
11790 
11791         cmd = target_to_host_fcntl_cmd(arg2);
11792         if (cmd == -TARGET_EINVAL) {
11793             return cmd;
11794         }
11795 
11796         switch(arg2) {
11797         case TARGET_F_GETLK64:
11798             ret = copyfrom(&fl, arg3);
11799             if (ret) {
11800                 break;
11801             }
11802             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11803             if (ret == 0) {
11804                 ret = copyto(arg3, &fl);
11805             }
11806 	    break;
11807 
11808         case TARGET_F_SETLK64:
11809         case TARGET_F_SETLKW64:
11810             ret = copyfrom(&fl, arg3);
11811             if (ret) {
11812                 break;
11813             }
11814             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11815 	    break;
11816         default:
11817             ret = do_fcntl(arg1, arg2, arg3);
11818             break;
11819         }
11820         return ret;
11821     }
11822 #endif
11823 #ifdef TARGET_NR_cacheflush
11824     case TARGET_NR_cacheflush:
11825         /* self-modifying code is handled automatically, so nothing needed */
11826         return 0;
11827 #endif
11828 #ifdef TARGET_NR_getpagesize
11829     case TARGET_NR_getpagesize:
11830         return TARGET_PAGE_SIZE;
11831 #endif
11832     case TARGET_NR_gettid:
11833         return get_errno(sys_gettid());
11834 #ifdef TARGET_NR_readahead
11835     case TARGET_NR_readahead:
11836 #if TARGET_ABI_BITS == 32
11837         if (regpairs_aligned(cpu_env, num)) {
11838             arg2 = arg3;
11839             arg3 = arg4;
11840             arg4 = arg5;
11841         }
11842         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11843 #else
11844         ret = get_errno(readahead(arg1, arg2, arg3));
11845 #endif
11846         return ret;
11847 #endif
11848 #ifdef CONFIG_ATTR
11849 #ifdef TARGET_NR_setxattr
11850     case TARGET_NR_listxattr:
11851     case TARGET_NR_llistxattr:
11852     {
11853         void *p, *b = 0;
11854         if (arg2) {
11855             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11856             if (!b) {
11857                 return -TARGET_EFAULT;
11858             }
11859         }
11860         p = lock_user_string(arg1);
11861         if (p) {
11862             if (num == TARGET_NR_listxattr) {
11863                 ret = get_errno(listxattr(p, b, arg3));
11864             } else {
11865                 ret = get_errno(llistxattr(p, b, arg3));
11866             }
11867         } else {
11868             ret = -TARGET_EFAULT;
11869         }
11870         unlock_user(p, arg1, 0);
11871         unlock_user(b, arg2, arg3);
11872         return ret;
11873     }
11874     case TARGET_NR_flistxattr:
11875     {
11876         void *b = 0;
11877         if (arg2) {
11878             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11879             if (!b) {
11880                 return -TARGET_EFAULT;
11881             }
11882         }
11883         ret = get_errno(flistxattr(arg1, b, arg3));
11884         unlock_user(b, arg2, arg3);
11885         return ret;
11886     }
11887     case TARGET_NR_setxattr:
11888     case TARGET_NR_lsetxattr:
11889         {
11890             void *p, *n, *v = 0;
11891             if (arg3) {
11892                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11893                 if (!v) {
11894                     return -TARGET_EFAULT;
11895                 }
11896             }
11897             p = lock_user_string(arg1);
11898             n = lock_user_string(arg2);
11899             if (p && n) {
11900                 if (num == TARGET_NR_setxattr) {
11901                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11902                 } else {
11903                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11904                 }
11905             } else {
11906                 ret = -TARGET_EFAULT;
11907             }
11908             unlock_user(p, arg1, 0);
11909             unlock_user(n, arg2, 0);
11910             unlock_user(v, arg3, 0);
11911         }
11912         return ret;
11913     case TARGET_NR_fsetxattr:
11914         {
11915             void *n, *v = 0;
11916             if (arg3) {
11917                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11918                 if (!v) {
11919                     return -TARGET_EFAULT;
11920                 }
11921             }
11922             n = lock_user_string(arg2);
11923             if (n) {
11924                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11925             } else {
11926                 ret = -TARGET_EFAULT;
11927             }
11928             unlock_user(n, arg2, 0);
11929             unlock_user(v, arg3, 0);
11930         }
11931         return ret;
11932     case TARGET_NR_getxattr:
11933     case TARGET_NR_lgetxattr:
11934         {
11935             void *p, *n, *v = 0;
11936             if (arg3) {
11937                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11938                 if (!v) {
11939                     return -TARGET_EFAULT;
11940                 }
11941             }
11942             p = lock_user_string(arg1);
11943             n = lock_user_string(arg2);
11944             if (p && n) {
11945                 if (num == TARGET_NR_getxattr) {
11946                     ret = get_errno(getxattr(p, n, v, arg4));
11947                 } else {
11948                     ret = get_errno(lgetxattr(p, n, v, arg4));
11949                 }
11950             } else {
11951                 ret = -TARGET_EFAULT;
11952             }
11953             unlock_user(p, arg1, 0);
11954             unlock_user(n, arg2, 0);
11955             unlock_user(v, arg3, arg4);
11956         }
11957         return ret;
11958     case TARGET_NR_fgetxattr:
11959         {
11960             void *n, *v = 0;
11961             if (arg3) {
11962                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11963                 if (!v) {
11964                     return -TARGET_EFAULT;
11965                 }
11966             }
11967             n = lock_user_string(arg2);
11968             if (n) {
11969                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11970             } else {
11971                 ret = -TARGET_EFAULT;
11972             }
11973             unlock_user(n, arg2, 0);
11974             unlock_user(v, arg3, arg4);
11975         }
11976         return ret;
11977     case TARGET_NR_removexattr:
11978     case TARGET_NR_lremovexattr:
11979         {
11980             void *p, *n;
11981             p = lock_user_string(arg1);
11982             n = lock_user_string(arg2);
11983             if (p && n) {
11984                 if (num == TARGET_NR_removexattr) {
11985                     ret = get_errno(removexattr(p, n));
11986                 } else {
11987                     ret = get_errno(lremovexattr(p, n));
11988                 }
11989             } else {
11990                 ret = -TARGET_EFAULT;
11991             }
11992             unlock_user(p, arg1, 0);
11993             unlock_user(n, arg2, 0);
11994         }
11995         return ret;
11996     case TARGET_NR_fremovexattr:
11997         {
11998             void *n;
11999             n = lock_user_string(arg2);
12000             if (n) {
12001                 ret = get_errno(fremovexattr(arg1, n));
12002             } else {
12003                 ret = -TARGET_EFAULT;
12004             }
12005             unlock_user(n, arg2, 0);
12006         }
12007         return ret;
12008 #endif
12009 #endif /* CONFIG_ATTR */
12010 #ifdef TARGET_NR_set_thread_area
12011     case TARGET_NR_set_thread_area:
12012 #if defined(TARGET_MIPS)
12013       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12014       return 0;
12015 #elif defined(TARGET_CRIS)
12016       if (arg1 & 0xff)
12017           ret = -TARGET_EINVAL;
12018       else {
12019           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12020           ret = 0;
12021       }
12022       return ret;
12023 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12024       return do_set_thread_area(cpu_env, arg1);
12025 #elif defined(TARGET_M68K)
12026       {
12027           TaskState *ts = cpu->opaque;
12028           ts->tp_value = arg1;
12029           return 0;
12030       }
12031 #else
12032       return -TARGET_ENOSYS;
12033 #endif
12034 #endif
12035 #ifdef TARGET_NR_get_thread_area
12036     case TARGET_NR_get_thread_area:
12037 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12038         return do_get_thread_area(cpu_env, arg1);
12039 #elif defined(TARGET_M68K)
12040         {
12041             TaskState *ts = cpu->opaque;
12042             return ts->tp_value;
12043         }
12044 #else
12045         return -TARGET_ENOSYS;
12046 #endif
12047 #endif
12048 #ifdef TARGET_NR_getdomainname
12049     case TARGET_NR_getdomainname:
12050         return -TARGET_ENOSYS;
12051 #endif
12052 
12053 #ifdef TARGET_NR_clock_settime
12054     case TARGET_NR_clock_settime:
12055     {
12056         struct timespec ts;
12057 
12058         ret = target_to_host_timespec(&ts, arg2);
12059         if (!is_error(ret)) {
12060             ret = get_errno(clock_settime(arg1, &ts));
12061         }
12062         return ret;
12063     }
12064 #endif
12065 #ifdef TARGET_NR_clock_settime64
12066     case TARGET_NR_clock_settime64:
12067     {
12068         struct timespec ts;
12069 
12070         ret = target_to_host_timespec64(&ts, arg2);
12071         if (!is_error(ret)) {
12072             ret = get_errno(clock_settime(arg1, &ts));
12073         }
12074         return ret;
12075     }
12076 #endif
12077 #ifdef TARGET_NR_clock_gettime
12078     case TARGET_NR_clock_gettime:
12079     {
12080         struct timespec ts;
12081         ret = get_errno(clock_gettime(arg1, &ts));
12082         if (!is_error(ret)) {
12083             ret = host_to_target_timespec(arg2, &ts);
12084         }
12085         return ret;
12086     }
12087 #endif
12088 #ifdef TARGET_NR_clock_gettime64
12089     case TARGET_NR_clock_gettime64:
12090     {
12091         struct timespec ts;
12092         ret = get_errno(clock_gettime(arg1, &ts));
12093         if (!is_error(ret)) {
12094             ret = host_to_target_timespec64(arg2, &ts);
12095         }
12096         return ret;
12097     }
12098 #endif
12099 #ifdef TARGET_NR_clock_getres
12100     case TARGET_NR_clock_getres:
12101     {
12102         struct timespec ts;
12103         ret = get_errno(clock_getres(arg1, &ts));
12104         if (!is_error(ret)) {
12105             host_to_target_timespec(arg2, &ts);
12106         }
12107         return ret;
12108     }
12109 #endif
12110 #ifdef TARGET_NR_clock_getres_time64
12111     case TARGET_NR_clock_getres_time64:
12112     {
12113         struct timespec ts;
12114         ret = get_errno(clock_getres(arg1, &ts));
12115         if (!is_error(ret)) {
12116             host_to_target_timespec64(arg2, &ts);
12117         }
12118         return ret;
12119     }
12120 #endif
12121 #ifdef TARGET_NR_clock_nanosleep
12122     case TARGET_NR_clock_nanosleep:
12123     {
12124         struct timespec ts;
12125         if (target_to_host_timespec(&ts, arg3)) {
12126             return -TARGET_EFAULT;
12127         }
12128         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12129                                              &ts, arg4 ? &ts : NULL));
12130         /*
12131          * if the call is interrupted by a signal handler, it fails
12132          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12133          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12134          */
12135         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12136             host_to_target_timespec(arg4, &ts)) {
12137               return -TARGET_EFAULT;
12138         }
12139 
12140         return ret;
12141     }
12142 #endif
12143 #ifdef TARGET_NR_clock_nanosleep_time64
12144     case TARGET_NR_clock_nanosleep_time64:
12145     {
12146         struct timespec ts;
12147 
12148         if (target_to_host_timespec64(&ts, arg3)) {
12149             return -TARGET_EFAULT;
12150         }
12151 
12152         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12153                                              &ts, arg4 ? &ts : NULL));
12154 
12155         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12156             host_to_target_timespec64(arg4, &ts)) {
12157             return -TARGET_EFAULT;
12158         }
12159         return ret;
12160     }
12161 #endif
12162 
12163 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12164     case TARGET_NR_set_tid_address:
12165         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12166 #endif
12167 
12168     case TARGET_NR_tkill:
12169         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12170 
12171     case TARGET_NR_tgkill:
12172         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12173                          target_to_host_signal(arg3)));
12174 
12175 #ifdef TARGET_NR_set_robust_list
12176     case TARGET_NR_set_robust_list:
12177     case TARGET_NR_get_robust_list:
12178         /* The ABI for supporting robust futexes has userspace pass
12179          * the kernel a pointer to a linked list which is updated by
12180          * userspace after the syscall; the list is walked by the kernel
12181          * when the thread exits. Since the linked list in QEMU guest
12182          * memory isn't a valid linked list for the host and we have
12183          * no way to reliably intercept the thread-death event, we can't
12184          * support these. Silently return ENOSYS so that guest userspace
12185          * falls back to a non-robust futex implementation (which should
12186          * be OK except in the corner case of the guest crashing while
12187          * holding a mutex that is shared with another process via
12188          * shared memory).
12189          */
12190         return -TARGET_ENOSYS;
12191 #endif
12192 
12193 #if defined(TARGET_NR_utimensat)
12194     case TARGET_NR_utimensat:
12195         {
12196             struct timespec *tsp, ts[2];
12197             if (!arg3) {
12198                 tsp = NULL;
12199             } else {
12200                 if (target_to_host_timespec(ts, arg3)) {
12201                     return -TARGET_EFAULT;
12202                 }
12203                 if (target_to_host_timespec(ts + 1, arg3 +
12204                                             sizeof(struct target_timespec))) {
12205                     return -TARGET_EFAULT;
12206                 }
12207                 tsp = ts;
12208             }
12209             if (!arg2)
12210                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12211             else {
12212                 if (!(p = lock_user_string(arg2))) {
12213                     return -TARGET_EFAULT;
12214                 }
12215                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12216                 unlock_user(p, arg2, 0);
12217             }
12218         }
12219         return ret;
12220 #endif
12221 #ifdef TARGET_NR_utimensat_time64
12222     case TARGET_NR_utimensat_time64:
12223         {
12224             struct timespec *tsp, ts[2];
12225             if (!arg3) {
12226                 tsp = NULL;
12227             } else {
12228                 if (target_to_host_timespec64(ts, arg3)) {
12229                     return -TARGET_EFAULT;
12230                 }
12231                 if (target_to_host_timespec64(ts + 1, arg3 +
12232                                      sizeof(struct target__kernel_timespec))) {
12233                     return -TARGET_EFAULT;
12234                 }
12235                 tsp = ts;
12236             }
12237             if (!arg2)
12238                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12239             else {
12240                 p = lock_user_string(arg2);
12241                 if (!p) {
12242                     return -TARGET_EFAULT;
12243                 }
12244                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12245                 unlock_user(p, arg2, 0);
12246             }
12247         }
12248         return ret;
12249 #endif
12250 #ifdef TARGET_NR_futex
12251     case TARGET_NR_futex:
12252         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12253 #endif
12254 #ifdef TARGET_NR_futex_time64
12255     case TARGET_NR_futex_time64:
12256         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12257 #endif
12258 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12259     case TARGET_NR_inotify_init:
12260         ret = get_errno(sys_inotify_init());
12261         if (ret >= 0) {
12262             fd_trans_register(ret, &target_inotify_trans);
12263         }
12264         return ret;
12265 #endif
12266 #ifdef CONFIG_INOTIFY1
12267 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12268     case TARGET_NR_inotify_init1:
12269         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12270                                           fcntl_flags_tbl)));
12271         if (ret >= 0) {
12272             fd_trans_register(ret, &target_inotify_trans);
12273         }
12274         return ret;
12275 #endif
12276 #endif
12277 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12278     case TARGET_NR_inotify_add_watch:
12279         p = lock_user_string(arg2);
12280         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12281         unlock_user(p, arg2, 0);
12282         return ret;
12283 #endif
12284 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12285     case TARGET_NR_inotify_rm_watch:
12286         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12287 #endif
12288 
12289 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12290     case TARGET_NR_mq_open:
12291         {
12292             struct mq_attr posix_mq_attr;
12293             struct mq_attr *pposix_mq_attr;
12294             int host_flags;
12295 
12296             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12297             pposix_mq_attr = NULL;
12298             if (arg4) {
12299                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12300                     return -TARGET_EFAULT;
12301                 }
12302                 pposix_mq_attr = &posix_mq_attr;
12303             }
12304             p = lock_user_string(arg1 - 1);
12305             if (!p) {
12306                 return -TARGET_EFAULT;
12307             }
12308             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12309             unlock_user (p, arg1, 0);
12310         }
12311         return ret;
12312 
12313     case TARGET_NR_mq_unlink:
12314         p = lock_user_string(arg1 - 1);
12315         if (!p) {
12316             return -TARGET_EFAULT;
12317         }
12318         ret = get_errno(mq_unlink(p));
12319         unlock_user (p, arg1, 0);
12320         return ret;
12321 
12322 #ifdef TARGET_NR_mq_timedsend
12323     case TARGET_NR_mq_timedsend:
12324         {
12325             struct timespec ts;
12326 
12327             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12328             if (arg5 != 0) {
12329                 if (target_to_host_timespec(&ts, arg5)) {
12330                     return -TARGET_EFAULT;
12331                 }
12332                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12333                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12334                     return -TARGET_EFAULT;
12335                 }
12336             } else {
12337                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12338             }
12339             unlock_user (p, arg2, arg3);
12340         }
12341         return ret;
12342 #endif
12343 #ifdef TARGET_NR_mq_timedsend_time64
12344     case TARGET_NR_mq_timedsend_time64:
12345         {
12346             struct timespec ts;
12347 
12348             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12349             if (arg5 != 0) {
12350                 if (target_to_host_timespec64(&ts, arg5)) {
12351                     return -TARGET_EFAULT;
12352                 }
12353                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12354                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12355                     return -TARGET_EFAULT;
12356                 }
12357             } else {
12358                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12359             }
12360             unlock_user(p, arg2, arg3);
12361         }
12362         return ret;
12363 #endif
12364 
12365 #ifdef TARGET_NR_mq_timedreceive
12366     case TARGET_NR_mq_timedreceive:
12367         {
12368             struct timespec ts;
12369             unsigned int prio;
12370 
12371             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12372             if (arg5 != 0) {
12373                 if (target_to_host_timespec(&ts, arg5)) {
12374                     return -TARGET_EFAULT;
12375                 }
12376                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12377                                                      &prio, &ts));
12378                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12379                     return -TARGET_EFAULT;
12380                 }
12381             } else {
12382                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12383                                                      &prio, NULL));
12384             }
12385             unlock_user (p, arg2, arg3);
12386             if (arg4 != 0)
12387                 put_user_u32(prio, arg4);
12388         }
12389         return ret;
12390 #endif
12391 #ifdef TARGET_NR_mq_timedreceive_time64
12392     case TARGET_NR_mq_timedreceive_time64:
12393         {
12394             struct timespec ts;
12395             unsigned int prio;
12396 
12397             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12398             if (arg5 != 0) {
12399                 if (target_to_host_timespec64(&ts, arg5)) {
12400                     return -TARGET_EFAULT;
12401                 }
12402                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12403                                                      &prio, &ts));
12404                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12405                     return -TARGET_EFAULT;
12406                 }
12407             } else {
12408                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12409                                                      &prio, NULL));
12410             }
12411             unlock_user(p, arg2, arg3);
12412             if (arg4 != 0) {
12413                 put_user_u32(prio, arg4);
12414             }
12415         }
12416         return ret;
12417 #endif
12418 
12419     /* Not implemented for now... */
12420 /*     case TARGET_NR_mq_notify: */
12421 /*         break; */
12422 
12423     case TARGET_NR_mq_getsetattr:
12424         {
12425             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12426             ret = 0;
12427             if (arg2 != 0) {
12428                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12429                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12430                                            &posix_mq_attr_out));
12431             } else if (arg3 != 0) {
12432                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12433             }
12434             if (ret == 0 && arg3 != 0) {
12435                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12436             }
12437         }
12438         return ret;
12439 #endif
12440 
12441 #ifdef CONFIG_SPLICE
12442 #ifdef TARGET_NR_tee
12443     case TARGET_NR_tee:
12444         {
12445             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12446         }
12447         return ret;
12448 #endif
12449 #ifdef TARGET_NR_splice
12450     case TARGET_NR_splice:
12451         {
12452             loff_t loff_in, loff_out;
12453             loff_t *ploff_in = NULL, *ploff_out = NULL;
12454             if (arg2) {
12455                 if (get_user_u64(loff_in, arg2)) {
12456                     return -TARGET_EFAULT;
12457                 }
12458                 ploff_in = &loff_in;
12459             }
12460             if (arg4) {
12461                 if (get_user_u64(loff_out, arg4)) {
12462                     return -TARGET_EFAULT;
12463                 }
12464                 ploff_out = &loff_out;
12465             }
12466             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12467             if (arg2) {
12468                 if (put_user_u64(loff_in, arg2)) {
12469                     return -TARGET_EFAULT;
12470                 }
12471             }
12472             if (arg4) {
12473                 if (put_user_u64(loff_out, arg4)) {
12474                     return -TARGET_EFAULT;
12475                 }
12476             }
12477         }
12478         return ret;
12479 #endif
12480 #ifdef TARGET_NR_vmsplice
12481 	case TARGET_NR_vmsplice:
12482         {
12483             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12484             if (vec != NULL) {
12485                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12486                 unlock_iovec(vec, arg2, arg3, 0);
12487             } else {
12488                 ret = -host_to_target_errno(errno);
12489             }
12490         }
12491         return ret;
12492 #endif
12493 #endif /* CONFIG_SPLICE */
12494 #ifdef CONFIG_EVENTFD
12495 #if defined(TARGET_NR_eventfd)
12496     case TARGET_NR_eventfd:
12497         ret = get_errno(eventfd(arg1, 0));
12498         if (ret >= 0) {
12499             fd_trans_register(ret, &target_eventfd_trans);
12500         }
12501         return ret;
12502 #endif
12503 #if defined(TARGET_NR_eventfd2)
12504     case TARGET_NR_eventfd2:
12505     {
12506         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12507         if (arg2 & TARGET_O_NONBLOCK) {
12508             host_flags |= O_NONBLOCK;
12509         }
12510         if (arg2 & TARGET_O_CLOEXEC) {
12511             host_flags |= O_CLOEXEC;
12512         }
12513         ret = get_errno(eventfd(arg1, host_flags));
12514         if (ret >= 0) {
12515             fd_trans_register(ret, &target_eventfd_trans);
12516         }
12517         return ret;
12518     }
12519 #endif
12520 #endif /* CONFIG_EVENTFD  */
12521 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12522     case TARGET_NR_fallocate:
12523 #if TARGET_ABI_BITS == 32
12524         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12525                                   target_offset64(arg5, arg6)));
12526 #else
12527         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12528 #endif
12529         return ret;
12530 #endif
12531 #if defined(CONFIG_SYNC_FILE_RANGE)
12532 #if defined(TARGET_NR_sync_file_range)
12533     case TARGET_NR_sync_file_range:
12534 #if TARGET_ABI_BITS == 32
12535 #if defined(TARGET_MIPS)
12536         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12537                                         target_offset64(arg5, arg6), arg7));
12538 #else
12539         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12540                                         target_offset64(arg4, arg5), arg6));
12541 #endif /* !TARGET_MIPS */
12542 #else
12543         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12544 #endif
12545         return ret;
12546 #endif
12547 #if defined(TARGET_NR_sync_file_range2) || \
12548     defined(TARGET_NR_arm_sync_file_range)
12549 #if defined(TARGET_NR_sync_file_range2)
12550     case TARGET_NR_sync_file_range2:
12551 #endif
12552 #if defined(TARGET_NR_arm_sync_file_range)
12553     case TARGET_NR_arm_sync_file_range:
12554 #endif
12555         /* This is like sync_file_range but the arguments are reordered */
12556 #if TARGET_ABI_BITS == 32
12557         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12558                                         target_offset64(arg5, arg6), arg2));
12559 #else
12560         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12561 #endif
12562         return ret;
12563 #endif
12564 #endif
12565 #if defined(TARGET_NR_signalfd4)
12566     case TARGET_NR_signalfd4:
12567         return do_signalfd4(arg1, arg2, arg4);
12568 #endif
12569 #if defined(TARGET_NR_signalfd)
12570     case TARGET_NR_signalfd:
12571         return do_signalfd4(arg1, arg2, 0);
12572 #endif
12573 #if defined(CONFIG_EPOLL)
12574 #if defined(TARGET_NR_epoll_create)
12575     case TARGET_NR_epoll_create:
12576         return get_errno(epoll_create(arg1));
12577 #endif
12578 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12579     case TARGET_NR_epoll_create1:
12580         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12581 #endif
12582 #if defined(TARGET_NR_epoll_ctl)
12583     case TARGET_NR_epoll_ctl:
12584     {
12585         struct epoll_event ep;
12586         struct epoll_event *epp = 0;
12587         if (arg4) {
12588             if (arg2 != EPOLL_CTL_DEL) {
12589                 struct target_epoll_event *target_ep;
12590                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12591                     return -TARGET_EFAULT;
12592                 }
12593                 ep.events = tswap32(target_ep->events);
12594                 /*
12595                  * The epoll_data_t union is just opaque data to the kernel,
12596                  * so we transfer all 64 bits across and need not worry what
12597                  * actual data type it is.
12598                  */
12599                 ep.data.u64 = tswap64(target_ep->data.u64);
12600                 unlock_user_struct(target_ep, arg4, 0);
12601             }
12602             /*
12603              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12604              * non-null pointer, even though this argument is ignored.
12605              *
12606              */
12607             epp = &ep;
12608         }
12609         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12610     }
12611 #endif
12612 
12613 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12614 #if defined(TARGET_NR_epoll_wait)
12615     case TARGET_NR_epoll_wait:
12616 #endif
12617 #if defined(TARGET_NR_epoll_pwait)
12618     case TARGET_NR_epoll_pwait:
12619 #endif
12620     {
12621         struct target_epoll_event *target_ep;
12622         struct epoll_event *ep;
12623         int epfd = arg1;
12624         int maxevents = arg3;
12625         int timeout = arg4;
12626 
12627         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12628             return -TARGET_EINVAL;
12629         }
12630 
12631         target_ep = lock_user(VERIFY_WRITE, arg2,
12632                               maxevents * sizeof(struct target_epoll_event), 1);
12633         if (!target_ep) {
12634             return -TARGET_EFAULT;
12635         }
12636 
12637         ep = g_try_new(struct epoll_event, maxevents);
12638         if (!ep) {
12639             unlock_user(target_ep, arg2, 0);
12640             return -TARGET_ENOMEM;
12641         }
12642 
12643         switch (num) {
12644 #if defined(TARGET_NR_epoll_pwait)
12645         case TARGET_NR_epoll_pwait:
12646         {
12647             target_sigset_t *target_set;
12648             sigset_t _set, *set = &_set;
12649 
12650             if (arg5) {
12651                 if (arg6 != sizeof(target_sigset_t)) {
12652                     ret = -TARGET_EINVAL;
12653                     break;
12654                 }
12655 
12656                 target_set = lock_user(VERIFY_READ, arg5,
12657                                        sizeof(target_sigset_t), 1);
12658                 if (!target_set) {
12659                     ret = -TARGET_EFAULT;
12660                     break;
12661                 }
12662                 target_to_host_sigset(set, target_set);
12663                 unlock_user(target_set, arg5, 0);
12664             } else {
12665                 set = NULL;
12666             }
12667 
12668             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12669                                              set, SIGSET_T_SIZE));
12670             break;
12671         }
12672 #endif
12673 #if defined(TARGET_NR_epoll_wait)
12674         case TARGET_NR_epoll_wait:
12675             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12676                                              NULL, 0));
12677             break;
12678 #endif
12679         default:
12680             ret = -TARGET_ENOSYS;
12681         }
12682         if (!is_error(ret)) {
12683             int i;
12684             for (i = 0; i < ret; i++) {
12685                 target_ep[i].events = tswap32(ep[i].events);
12686                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12687             }
12688             unlock_user(target_ep, arg2,
12689                         ret * sizeof(struct target_epoll_event));
12690         } else {
12691             unlock_user(target_ep, arg2, 0);
12692         }
12693         g_free(ep);
12694         return ret;
12695     }
12696 #endif
12697 #endif
12698 #ifdef TARGET_NR_prlimit64
12699     case TARGET_NR_prlimit64:
12700     {
12701         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12702         struct target_rlimit64 *target_rnew, *target_rold;
12703         struct host_rlimit64 rnew, rold, *rnewp = 0;
12704         int resource = target_to_host_resource(arg2);
12705 
12706         if (arg3 && (resource != RLIMIT_AS &&
12707                      resource != RLIMIT_DATA &&
12708                      resource != RLIMIT_STACK)) {
12709             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12710                 return -TARGET_EFAULT;
12711             }
12712             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12713             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12714             unlock_user_struct(target_rnew, arg3, 0);
12715             rnewp = &rnew;
12716         }
12717 
12718         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12719         if (!is_error(ret) && arg4) {
12720             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12721                 return -TARGET_EFAULT;
12722             }
12723             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12724             target_rold->rlim_max = tswap64(rold.rlim_max);
12725             unlock_user_struct(target_rold, arg4, 1);
12726         }
12727         return ret;
12728     }
12729 #endif
12730 #ifdef TARGET_NR_gethostname
12731     case TARGET_NR_gethostname:
12732     {
12733         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12734         if (name) {
12735             ret = get_errno(gethostname(name, arg2));
12736             unlock_user(name, arg1, arg2);
12737         } else {
12738             ret = -TARGET_EFAULT;
12739         }
12740         return ret;
12741     }
12742 #endif
12743 #ifdef TARGET_NR_atomic_cmpxchg_32
12744     case TARGET_NR_atomic_cmpxchg_32:
12745     {
12746         /* should use start_exclusive from main.c */
12747         abi_ulong mem_value;
12748         if (get_user_u32(mem_value, arg6)) {
12749             target_siginfo_t info;
12750             info.si_signo = SIGSEGV;
12751             info.si_errno = 0;
12752             info.si_code = TARGET_SEGV_MAPERR;
12753             info._sifields._sigfault._addr = arg6;
12754             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12755                          QEMU_SI_FAULT, &info);
12756             ret = 0xdeadbeef;
12757 
12758         }
12759         if (mem_value == arg2)
12760             put_user_u32(arg1, arg6);
12761         return mem_value;
12762     }
12763 #endif
12764 #ifdef TARGET_NR_atomic_barrier
12765     case TARGET_NR_atomic_barrier:
12766         /* Like the kernel implementation and the
12767            qemu arm barrier, no-op this? */
12768         return 0;
12769 #endif
12770 
12771 #ifdef TARGET_NR_timer_create
12772     case TARGET_NR_timer_create:
12773     {
12774         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12775 
12776         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12777 
12778         int clkid = arg1;
12779         int timer_index = next_free_host_timer();
12780 
12781         if (timer_index < 0) {
12782             ret = -TARGET_EAGAIN;
12783         } else {
12784             timer_t *phtimer = g_posix_timers  + timer_index;
12785 
12786             if (arg2) {
12787                 phost_sevp = &host_sevp;
12788                 ret = target_to_host_sigevent(phost_sevp, arg2);
12789                 if (ret != 0) {
12790                     return ret;
12791                 }
12792             }
12793 
12794             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12795             if (ret) {
12796                 phtimer = NULL;
12797             } else {
12798                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12799                     return -TARGET_EFAULT;
12800                 }
12801             }
12802         }
12803         return ret;
12804     }
12805 #endif
12806 
12807 #ifdef TARGET_NR_timer_settime
12808     case TARGET_NR_timer_settime:
12809     {
12810         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12811          * struct itimerspec * old_value */
12812         target_timer_t timerid = get_timer_id(arg1);
12813 
12814         if (timerid < 0) {
12815             ret = timerid;
12816         } else if (arg3 == 0) {
12817             ret = -TARGET_EINVAL;
12818         } else {
12819             timer_t htimer = g_posix_timers[timerid];
12820             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12821 
12822             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12823                 return -TARGET_EFAULT;
12824             }
12825             ret = get_errno(
12826                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12827             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12828                 return -TARGET_EFAULT;
12829             }
12830         }
12831         return ret;
12832     }
12833 #endif
12834 
12835 #ifdef TARGET_NR_timer_settime64
12836     case TARGET_NR_timer_settime64:
12837     {
12838         target_timer_t timerid = get_timer_id(arg1);
12839 
12840         if (timerid < 0) {
12841             ret = timerid;
12842         } else if (arg3 == 0) {
12843             ret = -TARGET_EINVAL;
12844         } else {
12845             timer_t htimer = g_posix_timers[timerid];
12846             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12847 
12848             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12849                 return -TARGET_EFAULT;
12850             }
12851             ret = get_errno(
12852                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12853             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12854                 return -TARGET_EFAULT;
12855             }
12856         }
12857         return ret;
12858     }
12859 #endif
12860 
12861 #ifdef TARGET_NR_timer_gettime
12862     case TARGET_NR_timer_gettime:
12863     {
12864         /* args: timer_t timerid, struct itimerspec *curr_value */
12865         target_timer_t timerid = get_timer_id(arg1);
12866 
12867         if (timerid < 0) {
12868             ret = timerid;
12869         } else if (!arg2) {
12870             ret = -TARGET_EFAULT;
12871         } else {
12872             timer_t htimer = g_posix_timers[timerid];
12873             struct itimerspec hspec;
12874             ret = get_errno(timer_gettime(htimer, &hspec));
12875 
12876             if (host_to_target_itimerspec(arg2, &hspec)) {
12877                 ret = -TARGET_EFAULT;
12878             }
12879         }
12880         return ret;
12881     }
12882 #endif
12883 
12884 #ifdef TARGET_NR_timer_gettime64
12885     case TARGET_NR_timer_gettime64:
12886     {
12887         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12888         target_timer_t timerid = get_timer_id(arg1);
12889 
12890         if (timerid < 0) {
12891             ret = timerid;
12892         } else if (!arg2) {
12893             ret = -TARGET_EFAULT;
12894         } else {
12895             timer_t htimer = g_posix_timers[timerid];
12896             struct itimerspec hspec;
12897             ret = get_errno(timer_gettime(htimer, &hspec));
12898 
12899             if (host_to_target_itimerspec64(arg2, &hspec)) {
12900                 ret = -TARGET_EFAULT;
12901             }
12902         }
12903         return ret;
12904     }
12905 #endif
12906 
12907 #ifdef TARGET_NR_timer_getoverrun
12908     case TARGET_NR_timer_getoverrun:
12909     {
12910         /* args: timer_t timerid */
12911         target_timer_t timerid = get_timer_id(arg1);
12912 
12913         if (timerid < 0) {
12914             ret = timerid;
12915         } else {
12916             timer_t htimer = g_posix_timers[timerid];
12917             ret = get_errno(timer_getoverrun(htimer));
12918         }
12919         return ret;
12920     }
12921 #endif
12922 
12923 #ifdef TARGET_NR_timer_delete
12924     case TARGET_NR_timer_delete:
12925     {
12926         /* args: timer_t timerid */
12927         target_timer_t timerid = get_timer_id(arg1);
12928 
12929         if (timerid < 0) {
12930             ret = timerid;
12931         } else {
12932             timer_t htimer = g_posix_timers[timerid];
12933             ret = get_errno(timer_delete(htimer));
12934             g_posix_timers[timerid] = 0;
12935         }
12936         return ret;
12937     }
12938 #endif
12939 
12940 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12941     case TARGET_NR_timerfd_create:
12942         return get_errno(timerfd_create(arg1,
12943                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12944 #endif
12945 
12946 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12947     case TARGET_NR_timerfd_gettime:
12948         {
12949             struct itimerspec its_curr;
12950 
12951             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12952 
12953             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12954                 return -TARGET_EFAULT;
12955             }
12956         }
12957         return ret;
12958 #endif
12959 
12960 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12961     case TARGET_NR_timerfd_gettime64:
12962         {
12963             struct itimerspec its_curr;
12964 
12965             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12966 
12967             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12968                 return -TARGET_EFAULT;
12969             }
12970         }
12971         return ret;
12972 #endif
12973 
12974 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12975     case TARGET_NR_timerfd_settime:
12976         {
12977             struct itimerspec its_new, its_old, *p_new;
12978 
12979             if (arg3) {
12980                 if (target_to_host_itimerspec(&its_new, arg3)) {
12981                     return -TARGET_EFAULT;
12982                 }
12983                 p_new = &its_new;
12984             } else {
12985                 p_new = NULL;
12986             }
12987 
12988             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12989 
12990             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12991                 return -TARGET_EFAULT;
12992             }
12993         }
12994         return ret;
12995 #endif
12996 
12997 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12998     case TARGET_NR_timerfd_settime64:
12999         {
13000             struct itimerspec its_new, its_old, *p_new;
13001 
13002             if (arg3) {
13003                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13004                     return -TARGET_EFAULT;
13005                 }
13006                 p_new = &its_new;
13007             } else {
13008                 p_new = NULL;
13009             }
13010 
13011             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13012 
13013             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13014                 return -TARGET_EFAULT;
13015             }
13016         }
13017         return ret;
13018 #endif
13019 
13020 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13021     case TARGET_NR_ioprio_get:
13022         return get_errno(ioprio_get(arg1, arg2));
13023 #endif
13024 
13025 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13026     case TARGET_NR_ioprio_set:
13027         return get_errno(ioprio_set(arg1, arg2, arg3));
13028 #endif
13029 
13030 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13031     case TARGET_NR_setns:
13032         return get_errno(setns(arg1, arg2));
13033 #endif
13034 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13035     case TARGET_NR_unshare:
13036         return get_errno(unshare(arg1));
13037 #endif
13038 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13039     case TARGET_NR_kcmp:
13040         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13041 #endif
13042 #ifdef TARGET_NR_swapcontext
13043     case TARGET_NR_swapcontext:
13044         /* PowerPC specific.  */
13045         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13046 #endif
13047 #ifdef TARGET_NR_memfd_create
13048     case TARGET_NR_memfd_create:
13049         p = lock_user_string(arg1);
13050         if (!p) {
13051             return -TARGET_EFAULT;
13052         }
13053         ret = get_errno(memfd_create(p, arg2));
13054         fd_trans_unregister(ret);
13055         unlock_user(p, arg1, 0);
13056         return ret;
13057 #endif
13058 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13059     case TARGET_NR_membarrier:
13060         return get_errno(membarrier(arg1, arg2));
13061 #endif
13062 
13063 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13064     case TARGET_NR_copy_file_range:
13065         {
13066             loff_t inoff, outoff;
13067             loff_t *pinoff = NULL, *poutoff = NULL;
13068 
13069             if (arg2) {
13070                 if (get_user_u64(inoff, arg2)) {
13071                     return -TARGET_EFAULT;
13072                 }
13073                 pinoff = &inoff;
13074             }
13075             if (arg4) {
13076                 if (get_user_u64(outoff, arg4)) {
13077                     return -TARGET_EFAULT;
13078                 }
13079                 poutoff = &outoff;
13080             }
13081             /* Do not sign-extend the count parameter. */
13082             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13083                                                  (abi_ulong)arg5, arg6));
13084             if (!is_error(ret) && ret > 0) {
13085                 if (arg2) {
13086                     if (put_user_u64(inoff, arg2)) {
13087                         return -TARGET_EFAULT;
13088                     }
13089                 }
13090                 if (arg4) {
13091                     if (put_user_u64(outoff, arg4)) {
13092                         return -TARGET_EFAULT;
13093                     }
13094                 }
13095             }
13096         }
13097         return ret;
13098 #endif
13099 
13100 #if defined(TARGET_NR_pivot_root)
13101     case TARGET_NR_pivot_root:
13102         {
13103             void *p2;
13104             p = lock_user_string(arg1); /* new_root */
13105             p2 = lock_user_string(arg2); /* put_old */
13106             if (!p || !p2) {
13107                 ret = -TARGET_EFAULT;
13108             } else {
13109                 ret = get_errno(pivot_root(p, p2));
13110             }
13111             unlock_user(p2, arg2, 0);
13112             unlock_user(p, arg1, 0);
13113         }
13114         return ret;
13115 #endif
13116 
13117     default:
13118         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13119         return -TARGET_ENOSYS;
13120     }
13121     return ret;
13122 }
13123 
13124 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13125                     abi_long arg2, abi_long arg3, abi_long arg4,
13126                     abi_long arg5, abi_long arg6, abi_long arg7,
13127                     abi_long arg8)
13128 {
13129     CPUState *cpu = env_cpu(cpu_env);
13130     abi_long ret;
13131 
13132 #ifdef DEBUG_ERESTARTSYS
13133     /* Debug-only code for exercising the syscall-restart code paths
13134      * in the per-architecture cpu main loops: restart every syscall
13135      * the guest makes once before letting it through.
13136      */
13137     {
13138         static bool flag;
13139         flag = !flag;
13140         if (flag) {
13141             return -TARGET_ERESTARTSYS;
13142         }
13143     }
13144 #endif
13145 
13146     record_syscall_start(cpu, num, arg1,
13147                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13148 
13149     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13150         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13151     }
13152 
13153     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13154                       arg5, arg6, arg7, arg8);
13155 
13156     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13157         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13158                           arg3, arg4, arg5, arg6);
13159     }
13160 
13161     record_syscall_return(cpu, num, ret);
13162     return ret;
13163 }
13164